]> rtime.felk.cvut.cz Git - can-benchmark.git/blob - doc/notes.org
b27d6e202a514cbf6a46cba07b35d41289b294c0
[can-benchmark.git] / doc / notes.org
1 * uboot settings
2
3  set ipaddr 192.168.2.3
4  set netmask 255.255.255.0
5  set serverip 192.168.2.2 
6  set imagefile ryu/uImage
7  set devicetreefile ryu/shark-ryu.dtb
8  set nfspath /srv/nfs/root-shark
9  set bootcmd_tftpnfs_static 'tftp 800000 ${imagefile}; tftp 7f0000 ${devicetreefile}; set bootargs ${linux_console} root=/dev/nfs nfsroot=${serverip}:${nfspath} rw ip=${ipaddr}; mw f0000b00 ${psc_cfg}; bootm 800000 - 7f0000'
10  set bootcmd 'run bootcmd_tftpnfs_static'
11
12 * Questions
13 ** Why crc modification alone does not produce an error and is not returned by cangw -L? 
14 * Kernel oops
15 ** Linux-2.6.33.7-00005-ge2f49b5 
16 Unable to handle kernel paging request for instruction fetch
17 Faulting instruction address: 0x8d56acc4
18 Oops: Kernel access of bad area, sig: 11 [#1]
19 PREEMPT Shark
20 Modules linked in:
21 NIP: 8d56acc4 LR: c0260208 CTR: 8d56acc7
22 REGS: c03f3bc0 TRAP: 0400   Not tainted  (2.6.33.7-00005-ge2f49b5)
23 MSR: 20009032 <EE,ME,IR,DR>  CR: 24008028  XER: 0000005f
24 TASK = c03d5478[0] 'swapper' THREAD: c03f2000
25 GPR00: 8d56acc7 c03f3c70 c03d5478 c7990160 c78d54ad 00000048 c79901a4 00000002 
26 GPR08: 000000e0 00000000 00000001 00000002 44000022 7f33ff4e 80000000 00000042 
27 GPR16: 00000008 20000000 c909e900 00000001 c03f3d78 c79326e0 00000008 c799dc1c 
28 GPR24: c7976800 c78d54f9 c7949828 c7a06620 c03f2000 c7990160 c78d54ad c03f3c70 
29 NIP [8d56acc4] 0x8d56acc4
30 LR [c0260208] dev_queue_xmit+0x2f0/0x588
31 Call Trace:
32 [c03f3c70] [c025ff74] dev_queue_xmit+0x5c/0x588 (unreliable)
33 [c03f3ca0] [c02d62d8] can_send+0x9c/0x1a0
34 [c03f3cc0] [c02d97a8] can_can_gw_rcv+0x108/0x164
35 [c03f3cf0] [c02d52ac] can_rcv_filter+0x240/0x2e8
36 [c03f3d10] [c02d541c] can_rcv+0xc8/0x140
37 [c03f3d30] [c025e968] netif_receive_skb+0x2ac/0x350
38 [c03f3d70] [c020a230] mscan_rx_poll+0x1c0/0x464
39 [c03f3dd0] [c025f428] net_rx_action+0x104/0x22c
40 [c03f3e20] [c00345e0] __do_softirq+0x10c/0x21c
41 [c03f3e70] [c00075d0] do_softirq+0x58/0x74
42 [c03f3e80] [c0034300] irq_exit+0x8c/0xbc
43 [c03f3e90] [c00076c8] do_IRQ+0xdc/0x188
44 [c03f3ec0] [c0014db0] ret_from_except+0x0/0x14
45 --- Exception: 501 at cpu_idle+0x104/0x114
46     LR = cpu_idle+0x104/0x114
47 [c03f3f80] [c000aefc] cpu_idle+0x68/0x114 (unreliable)
48 [c03f3fa0] [c0003f40] rest_init+0xa4/0xc4
49 [c03f3fc0] [c039f918] start_kernel+0x2e4/0x2fc
50 [c03f3ff0] [00003438] 0x3438
51 Instruction dump:
52 XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX 
53 XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX 
54 Kernel panic - not syncing: Fatal exception in interrupt
55 Call Trace:
56 [c03f3ab0] [c0009cdc] show_stack+0x70/0x1d4 (unreliable)
57 [c03f3b00] [c02ff5ec] dump_stack+0x2c/0x44
58 [c03f3b10] [c02ff6b0] panic+0xac/0x19c
59 [c03f3b60] [c0011df8] die+0x188/0x1a4
60 [c03f3b90] [c00165c8] bad_page_fault+0x90/0xe0
61 [c03f3bb0] [c0014bb8] handle_page_fault+0x7c/0x80
62 --- Exception: 400 at 0x8d56acc4
63     LR = dev_queue_xmit+0x2f0/0x588
64 [c03f3c70] [c025ff74] dev_queue_xmit+0x5c/0x588 (unreliable)
65 [c03f3ca0] [c02d62d8] can_send+0x9c/0x1a0
66 [c03f3cc0] [c02d97a8] can_can_gw_rcv+0x108/0x164
67 [c03f3cf0] [c02d52ac] can_rcv_filter+0x240/0x2e8
68 [c03f3d10] [c02d541c] can_rcv+0xc8/0x140
69 [c03f3d30] [c025e968] netif_receive_skb+0x2ac/0x350
70 [c03f3d70] [c020a230] mscan_rx_poll+0x1c0/0x464
71 [c03f3dd0] [c025f428] net_rx_action+0x104/0x22c
72 [c03f3e20] [c00345e0] __do_softirq+0x10c/0x21c
73 [c03f3e70] [c00075d0] do_softirq+0x58/0x74
74 [c03f3e80] [c0034300] irq_exit+0x8c/0xbc
75 [c03f3e90] [c00076c8] do_IRQ+0xdc/0x188
76 [c03f3ec0] [c0014db0] ret_from_except+0x0/0x14
77 --- Exception: 501 at cpu_idle+0x104/0x114
78     LR = cpu_idle+0x104/0x114
79 [c03f3f80] [c000aefc] cpu_idle+0x68/0x114 (unreliable)
80 [c03f3fa0] [c0003f40] rest_init+0xa4/0xc4
81 [c03f3fc0] [c039f918] start_kernel+0x2e4/0x2fc
82 [c03f3ff0] [00003438] 0x3438
83 Rebooting in 180 seconds..
84
85 ** Current kernel
86 Unable to handle kernel paging request for instruction fetch
87 Faulting instruction address: 0x8cd6acc4
88 Oops: Kernel access of bad area, sig: 11 [#1]
89 PREEMPT Shark
90 Modules linked in:
91 NIP: 8cd6acc4 LR: c025fdac CTR: 8cd6acc7
92 REGS: c03f3bd0 TRAP: 0400   Not tainted  (2.6.33.7-00005-ge2f49b5)
93 MSR: 20009032 <EE,ME,IR,DR>  CR: 22008028  XER: 0000005f
94 TASK = c03d6478[0] 'swapper' THREAD: c03f2000
95 GPR00: 8cd6acc7 c03f3c80 c03d6478 c79fec00 c78cd4ad 00000048 c79fec44 00000002 
96 GPR08: 000000e0 00000000 00000001 c03f3c80 42000022 7f33ff4e 80000000 00000042 
97 GPR16: 00000008 20000000 c909e900 00000001 c03f3d88 c79386e0 00000008 c79fe51c 
98 GPR24: 0000000c c78cd4f9 c7975800 c7949828 c7a33d24 c79fec00 c78cd4ad c03f3c80 
99 NIP [8cd6acc4] 0x8cd6acc4
100 LR [c025fdac] dev_queue_xmit+0xec/0x588
101 Call Trace:
102 [c03f3c80] [c025fd68] dev_queue_xmit+0xa8/0x588 (unreliable)
103 [c03f3cb0] [c02d5128] can_send+0x9c/0x1a0
104 [c03f3cd0] [c02d85ec] can_can_gw_rcv+0x108/0x164
105 [c03f3d00] [c02d3fdc] can_rcv_filter+0xf8/0x2e8
106 [c03f3d20] [c02d4294] can_rcv+0xc8/0x140
107 [c03f3d40] [c025e6f8] netif_receive_skb+0x2a8/0x34c
108 [c03f3d80] [c020a774] mscan_rx_poll+0x1c0/0x464
109 [c03f3de0] [c025f1b8] net_rx_action+0x104/0x238
110 [c03f3e30] [c00368c8] __do_softirq+0x128/0x244
111 [c03f3e80] [c00075e0] do_softirq+0x64/0x80
112 [c03f3e90] [c00365d0] irq_exit+0x98/0xc4
113 [c03f3ea0] [c00076d8] do_IRQ+0xdc/0x188
114 [c03f3ed0] [c0014d40] ret_from_except+0x0/0x14
115 [c03f3f90] [c000aed8] cpu_idle+0x68/0x10c (unreliable)
116 --- Exception: 501 at cpu_idle+0xfc/0x10c
117     LR = cpu_idle+0xfc/0x10c
118 [c03f3fb0] [c0003f34] rest_init+0x98/0xc4
119 [c03f3fc0] [c039f914] start_kernel+0x2e0/0x2f8
120 [c03f3ff0] [00003438] 0x3438
121 Instruction dump:
122 XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX 
123 XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX 
124 Kernel panic - not syncing: Fatal exception in interrupt
125 Call Trace:
126 [c03f3ac0] [c0009cc0] show_stack+0x70/0x1d4 (unreliable)
127 [c03f3b10] [c02fde74] dump_stack+0x2c/0x44
128 [c03f3b20] [c02fdf30] panic+0xa4/0x19c
129 [c03f3b70] [c0011d90] die+0x184/0x1a0
130 --- Exception: c03f3d88 at 0x42000022
131     LR = 0x8
132 [c03f3ba0] [c0016558] bad_page_fault+0x90/0xe0 (unreliable)
133 [c03f3bc0] [c0014b48] handle_page_fault+0x7c/0x80
134 --- Exception: 400 at 0x8cd6acc4
135     LR = dev_queue_xmit+0xec/0x588
136 [c03f3c80] [c025fd68] dev_queue_xmit+0xa8/0x588 (unreliable)
137 [c03f3cb0] [c02d5128] can_send+0x9c/0x1a0
138 [c03f3cd0] [c02d85ec] can_can_gw_rcv+0x108/0x164
139 [c03f3d00] [c02d3fdc] can_rcv_filter+0xf8/0x2e8
140 [c03f3d20] [c02d4294] can_rcv+0xc8/0x140
141 [c03f3d40] [c025e6f8] netif_receive_skb+0x2a8/0x34c
142 [c03f3d80] [c020a774] mscan_rx_poll+0x1c0/0x464
143 [c03f3de0] [c025f1b8] net_rx_action+0x104/0x238
144 [c03f3e30] [c00368c8] __do_softirq+0x128/0x244
145 [c03f3e80] [c00075e0] do_softirq+0x64/0x80
146 [c03f3e90] [c00365d0] irq_exit+0x98/0xc4
147 [c03f3ea0] [c00076d8] do_IRQ+0xdc/0x188
148 [c03f3ed0] [c0014d40] ret_from_except+0x0/0x14
149 --- Exception: 501 at cpu_idle+0xfc/0x10c
150     LR = cpu_idle+0xfc/0x10c
151 [c03f3f90] [c000aed8] cpu_idle+0x68/0x10c (unreliable)
152 [c03f3fb0] [c0003f34] rest_init+0x98/0xc4
153 [c03f3fc0] [c039f914] start_kernel+0x2e0/0x2f8
154 [c03f3ff0] [00003438] 0x3438
155
156
157 *** Disassemble
158         if (q->enqueue) {
159 c025fd54:       80 1e 00 00     lwz     r0,0(r30)
160 c025fd58:       2f 80 00 00     cmpwi   cr7,r0,0
161 c025fd5c:       41 9e 01 94     beq-    cr7,c025fef0 <dev_queue_xmit+0x230>
162         raw_spin_lock_init(&(_lock)->rlock);            \
163 } while (0)
164
165 static inline void spin_lock(spinlock_t *lock)
166 {
167         raw_spin_lock(&lock->rlock);
168 c025fd60:       38 60 00 01     li      r3,1
169 c025fd64:       4b dc 47 cd     bl      c0024530 <add_preempt_count>
170 c025fd68:       80 1e 00 4c     lwz     r0,76(r30) !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
171 {
172         spinlock_t *root_lock = qdisc_lock(q);
173         int rc;
174
175         spin_lock(root_lock);
176         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
177 c025fd6c:       3b 3e 00 4c     addi    r25,r30,76
178 c025fd70:       70 09 00 04     andi.   r9,r0,4
179 c025fd74:       40 82 01 f0     bne-    c025ff64 <dev_queue_xmit+0x2a4>
180
181 *** Corresponding C code
182 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
183                                  struct net_device *dev,
184                                  struct netdev_queue *txq)
185 {
186         spinlock_t *root_lock = qdisc_lock(q);
187         int rc;
188
189         spin_lock(root_lock);
190
191 ** 2.6.36.1-00006-g2e11adb
192 Faulting instruction address: 0xc0262358
193 Oops: Kernel access of bad area, sig: 11 [#1]
194 PREEMPT Shark
195 last sysfs file: /sys/devices/lpb.0/fc000000.flash/mtd/mtd2ro/dev
196 Modules linked in:
197 NIP: c0262358 LR: c026218c CTR: 00000000
198 REGS: c7ffbd20 TRAP: 0300   Not tainted  (2.6.36.1-00006-g2e11adb)
199 MSR: 00009032 <EE,ME,IR,DR>  CR: 42008024  XER: 0000005f
200 DAR: fffffffe, DSISR: 20000000
201 TASK = c03e44d0[0] 'swapper' THREAD: c0400000
202 GPR00: c7939560 c7ffbdd0 c03e44d0 00000100 00000000 00000060 c798bb64 00000000 
203 GPR08: 000000e8 00000002 00000001 00000002 42000022 7f33ff4e 80000000 00000042 
204 GPR16: 00000008 20000000 c909e900 00000001 c7ffbef8 c0404a84 c0404a80 c78b4000 
205 GPR24: 00000000 c798bb20 c79395c0 c7a77e20 c796f800 c798bb20 fffffffe c7ffbdd0 
206 NIP [c0262358] dev_queue_xmit+0x200/0x4f0
207 LR [c026218c] dev_queue_xmit+0x34/0x4f0
208 Call Trace:
209 [c7ffbdd0] [c026218c] dev_queue_xmit+0x34/0x4f0 (unreliable)
210 [c7ffbe00] [c02dd1f4] can_send+0x9c/0x1a0
211 [c7ffbe20] [c02e07a8] can_can_gw_rcv+0x108/0x164
212 [c7ffbe50] [c02dc224] can_rcv_filter+0xf8/0x2e8
213 [c7ffbe70] [c02dc4dc] can_rcv+0xc8/0x140
214 [c7ffbe90] [c02606f0] __netif_receive_skb+0x2cc/0x338
215 [c7ffbed0] [c0260934] netif_receive_skb+0x5c/0x98
216 [c7ffbef0] [c020d72c] mscan_rx_poll+0x1c0/0x454
217 [c7ffbf50] [c0260c64] net_rx_action+0x104/0x230
218 [c7ffbfa0] [c0031350] __do_softirq+0x118/0x22c
219 [c7ffbff0] [c0011704] call_do_softirq+0x14/0x24
220 [c0401e60] [c0006bdc] do_softirq+0x84/0xa8
221 [c0401e80] [c0031074] irq_exit+0x88/0xb4
222 [c0401e90] [c0006d60] do_IRQ+0xe0/0x234
223 [c0401ec0] [c00123d4] ret_from_except+0x0/0x14
224 --- Exception: 501 at cpu_idle+0xfc/0x10c
225     LR = cpu_idle+0xfc/0x10c
226 [c0401f80] [c000a7a8] cpu_idle+0x68/0x10c (unreliable)
227 [c0401fa0] [c0003ec0] rest_init+0x9c/0xbc
228 [c0401fc0] [c03ad91c] start_kernel+0x2c0/0x2d8
229 [c0401ff0] [00003438] 0x3438
230 Instruction dump:
231 41920198 817e000c 2f8b0000 419c018c 55693032 55602036 7ca04850 5569043e 
232 b13d0074 801c01c8 7f402a14 83da0004 <801e0000> 2f800000 409efe88 801c00d8 
233 Kernel panic - not syncing: Fatal exception in interrupt
234 Call Trace:
235 [c7ffbc00] [c00095dc] show_stack+0xb0/0x1d4 (unreliable)
236 [c7ffbc50] [c030664c] dump_stack+0x2c/0x44
237 [c7ffbc60] [c0306720] panic+0xbc/0x1fc
238 [c7ffbcc0] [c000f34c] die+0x1b8/0x1e8
239 --- Exception: c7ffbef8 at 0x42000022
240     LR = 0x8
241 [c7ffbcf0] [c0013b8c] bad_page_fault+0x90/0xe0 (unreliable)
242 [c7ffbd10] [c00121dc] handle_page_fault+0x7c/0x80
243 --- Exception: 300 at dev_queue_xmit+0x200/0x4f0
244     LR = dev_queue_xmit+0x34/0x4f0
245 [c7ffbe00] [c02dd1f4] can_send+0x9c/0x1a0
246 [c7ffbe20] [c02e07a8] can_can_gw_rcv+0x108/0x164
247 [c7ffbe50] [c02dc224] can_rcv_filter+0xf8/0x2e8
248 [c7ffbe70] [c02dc4dc] can_rcv+0xc8/0x140
249 [c7ffbe90] [c02606f0] __netif_receive_skb+0x2cc/0x338
250 [c7ffbed0] [c0260934] netif_receive_skb+0x5c/0x98
251 [c7ffbef0] [c020d72c] mscan_rx_poll+0x1c0/0x454
252 [c7ffbf50] [c0260c64] net_rx_action+0x104/0x230
253 [c7ffbfa0] [c0031350] __do_softirq+0x118/0x22c
254 [c7ffbff0] [c0011704] call_do_softirq+0x14/0x24
255 [c0401e60] [c0006bdc] do_softirq+0x84/0xa8
256 [c0401e80] [c0031074] irq_exit+0x88/0xb4
257 [c0401e90] [c0006d60] do_IRQ+0xe0/0x234
258 [c0401ec0] [c00123d4] ret_from_except+0x0/0x14
259 --- Exception: 501 at cpu_idle+0xfc/0x10c
260     LR = cpu_idle+0xfc/0x10c
261 [c0401f80] [c000a7a8] cpu_idle+0x68/0x10c (unreliable)
262 [c0401fa0] [c0003ec0] rest_init+0x9c/0xbc
263 [c0401fc0] [c03ad91c] start_kernel+0x2c0/0x2d8
264 [c0401ff0] [00003438] 0x3438
265
266 * write returns ENOBUFS
267 The reason is queuing discipline - the queue is full and
268 pfifo_fast_enqueue returns failure. This failure is propagated down to
269 qdisc_enqueue_root(), __dev_xmit_skb(), dev_queue_xmit() and
270 can_send()
271
272 ** See the follwing trace
273
274 # tracer: function_graph
275 #
276 # CPU  DURATION                  FUNCTION CALLS
277 # |     |   |                     |   |   |   |
278  1)               |  can_send() {
279  1)               |    dev_queue_xmit() {
280  1)               |      rt_spin_lock() {
281  1)   0.894 us    |        __might_sleep();
282  1)   2.750 us    |      }
283  1)               |      pfifo_fast_enqueue() {
284  1)               |        kfree_skb() {
285  1)               |          __kfree_skb() {
286  1)               |            skb_release_head_state() {
287  1)               |              sock_wfree() {
288  1)               |                sock_def_write_space() {
289  1)               |                  rt_read_lock() {
290  1)               |                    __rt_spin_lock() {
291  1)   0.677 us    |                      __might_sleep();
292  1)   2.386 us    |                    }
293  1)   4.150 us    |                  }
294  1)               |                  rt_read_unlock() {
295  1)   0.918 us    |                    __rt_spin_unlock();
296  1)   2.644 us    |                  }
297  1)   9.375 us    |                }
298  1) + 10.999 us   |              }
299  1) + 12.708 us   |            }
300  1)               |            skb_release_data() {
301  1)               |              kfree() {
302  1)               |                _slab_irq_disable() {
303  1)               |                  rt_spin_lock() {
304  1)   0.849 us    |                    __might_sleep();
305  1)   2.588 us    |                  }
306  1)   4.242 us    |                }
307  1)   0.864 us    |                __cache_free();
308  1)   0.888 us    |                rt_spin_unlock();
309  1)   9.456 us    |              }
310  1) + 11.185 us   |            }
311  1)               |            kmem_cache_free() {
312  1)               |              _slab_irq_disable() {
313  1)               |                rt_spin_lock() {
314  1)   0.812 us    |                  __might_sleep();
315  1)   2.542 us    |                }
316  1)   4.240 us    |              }
317  1)   0.813 us    |              __cache_free();
318  1)   0.894 us    |              rt_spin_unlock();
319  1)   9.329 us    |            }
320  1) + 36.444 us   |          }
321  1) + 38.136 us   |        }
322  1) + 39.974 us   |      }
323  1)               |      /* qdisc_enqueue_root = 1 */
324  1)               |      __qdisc_run() {
325  1)               |        sch_direct_xmit() {
326  1)   0.855 us    |          rt_spin_unlock();
327  1)               |          rt_spin_lock() {
328  1)   0.827 us    |            __might_sleep();
329  1)   2.521 us    |          }
330  1)               |          dev_hard_start_xmit() {
331  1)               |            sja1000_start_xmit() {
332  1)   1.646 us    |              kvaser_pci_write_reg();
333  1)   1.460 us    |              kvaser_pci_write_reg();
334  1)   1.691 us    |              kvaser_pci_write_reg();
335  1)   1.450 us    |              kvaser_pci_write_reg();
336  1)   1.416 us    |              kvaser_pci_write_reg();
337  1)               |              can_put_echo_skb() {
338  1)               |                sock_wfree() {
339  1)               |                  sock_def_write_space() {
340  1)               |                    rt_read_lock() {
341  1)               |                      __rt_spin_lock() {
342  1)   0.939 us    |                        __might_sleep();
343  1)   2.699 us    |                      }
344  1)   4.337 us    |                    }
345  1)               |                    rt_read_unlock() {
346  1)   0.844 us    |                      __rt_spin_unlock();
347  1)   2.507 us    |                    }
348  1)   9.242 us    |                  }
349  1) + 11.047 us   |                }
350  1) + 12.791 us   |              }
351  1)               |              sja1000_write_cmdreg() {
352  1)               |                rt_spin_lock() {
353  1)   0.802 us    |                  __might_sleep();
354  1)   2.532 us    |                }
355  1)   1.481 us    |                kvaser_pci_write_reg();
356  1)   2.155 us    |                kvaser_pci_read_reg();
357  1)   0.843 us    |                rt_spin_unlock();
358  1) + 11.100 us   |              }
359  1) + 38.263 us   |            }
360  1)               |            /* ops->ndo_start_xmit(skb, dev) = 0 */
361  1) + 41.508 us   |          }
362  1)               |          /* dev_hard_start_xmit(skb, dev) = 0 */
363  1)   0.884 us    |          rt_spin_unlock();
364  1)               |          rt_spin_lock() {
365  1)   0.817 us    |            __might_sleep();
366  1)   2.583 us    |          }
367  1)               |          /* qdisc_len = 9 */
368  1)               |          /* sch_direct_xmit returns 0 */
369  1) + 57.502 us   |        }
370  1) + 59.288 us   |      }
371  1)   0.877 us    |      rt_spin_unlock();
372  1)               |      /* __dev_xmit_skb = 1 */
373  1) ! 111.265 us  |    }
374  1)               |  /* dev_queue_xmit = 1 */
375  1)               |  /* net_xmit_errno = -105 */
376  1)   0.956 us    |    kfree_skb();
377  1) ! 117.340 us  |  }
378  1)               |  /* write returned -1 */
379
380 ** The trace was generated by kernel v2.6.33.7-rt29-4-g92487e4 with the following patch applied:
381
382 diff --git a/net/can/af_can.c b/net/can/af_can.c
383 index 4d8479d7..c91968d 100644
384 --- a/net/can/af_can.c
385 +++ b/net/can/af_can.c
386 @@ -344,8 +344,11 @@ int can_send(struct sk_buff *skb, int loop)
387  
388         /* send to netdevice */
389         err = dev_queue_xmit(skb);
390 -       if (err > 0)
391 +       trace_printk("dev_queue_xmit = %d\n", err);
392 +       if (err > 0) {
393                 err = net_xmit_errno(err);
394 +               trace_printk("net_xmit_errno = %d\n", err);
395 +       }
396  
397         if (err) {
398  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
399 diff --git a/net/core/dev.c b/net/core/dev.c
400 index 7a5412e..10ca9b4 100644
401 --- a/net/core/dev.c
402 +++ b/net/core/dev.c
403 @@ -1828,6 +1828,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
404                         skb_dst_drop(skb);
405  
406                 rc = ops->ndo_start_xmit(skb, dev);
407 +               trace_printk("ops->ndo_start_xmit(skb, dev) = %d\n", rc);
408 +
409                 if (rc == NETDEV_TX_OK)
410                         txq_trans_update(txq);
411                 /*
412 @@ -1854,6 +1856,7 @@ gso:
413                 skb->next = nskb->next;
414                 nskb->next = NULL;
415                 rc = ops->ndo_start_xmit(nskb, dev);
416 +               trace_printk("gso: ops->ndo_start_xmit(skb, dev) = %d\n", rc);
417                 if (unlikely(rc != NETDEV_TX_OK)) {
418                         if (rc & ~NETDEV_TX_MASK)
419                                 goto out_kfree_gso_skb;
420 @@ -1949,6 +1952,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
421         spin_lock(root_lock);
422         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
423                 kfree_skb(skb);
424 +               trace_printk("QDISC_STATE_DEACTIVATED\n");
425 +
426                 rc = NET_XMIT_DROP;
427         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
428                    !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
429 @@ -1966,6 +1971,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
430                 rc = NET_XMIT_SUCCESS;
431         } else {
432                 rc = qdisc_enqueue_root(skb, q);
433 +               trace_printk("qdisc_enqueue_root = %d\n", rc);
434                 qdisc_run(q);
435         }
436         spin_unlock(root_lock);
437 @@ -2047,6 +2053,7 @@ gso:
438  #endif
439         if (q->enqueue) {
440                 rc = __dev_xmit_skb(skb, q, dev, txq);
441 +               trace_printk("__dev_xmit_skb = %d\n", rc);
442                 goto out;
443         }
444  
445 @@ -2072,6 +2079,8 @@ gso:
446  
447                         if (!netif_tx_queue_stopped(txq)) {
448                                 rc = dev_hard_start_xmit(skb, dev, txq);
449 +                               trace_printk("dev_hard_start_xmit = %d\n", rc);
450 +
451                                 if (dev_xmit_complete(rc)) {
452                                         HARD_TX_UNLOCK(dev, txq);
453                                         goto out;
454 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
455 index bf7f50e..aea0833 100644
456 --- a/net/sched/sch_generic.c
457 +++ b/net/sched/sch_generic.c
458 @@ -125,6 +125,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
459         HARD_TX_LOCK(dev, txq);
460         if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
461                 ret = dev_hard_start_xmit(skb, dev, txq);
462 +       trace_printk("dev_hard_start_xmit(skb, dev) = %d\n", ret);
463  
464         HARD_TX_UNLOCK(dev, txq);
465  
466 @@ -133,9 +134,11 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
467         if (dev_xmit_complete(ret)) {
468                 /* Driver sent out skb successfully or skb was consumed */
469                 ret = qdisc_qlen(q);
470 +               trace_printk("qdisc_len = %d\n", ret);
471         } else if (ret == NETDEV_TX_LOCKED) {
472                 /* Driver try lock failed */
473                 ret = handle_dev_cpu_collision(skb, txq, q);
474 +               trace_printk("handle_dev_cpu_collision = %d\n", ret);
475         } else {
476                 /* Driver returned NETDEV_TX_BUSY - requeue skb */
477                 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
478 @@ -143,12 +146,13 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
479                                dev->name, ret, q->q.qlen);
480  
481                 ret = dev_requeue_skb(skb, q);
482 +               trace_printk("dev_requeue_skb = %d\n", ret);
483         }
484  
485         if (ret && (netif_tx_queue_stopped(txq) ||
486                     netif_tx_queue_frozen(txq)))
487                 ret = 0;
488 -
489 +       trace_printk("%s returns %d\n", __FUNCTION__, ret);
490         return ret;
491  }
492