]> rtime.felk.cvut.cz Git - can-benchmark.git/blob - doc/notes.org
254ada53bd10d45b2c72ba56f477fa66be315c49
[can-benchmark.git] / doc / notes.org
1 * uboot settings
2
3  set ipaddr 192.168.2.3
4  set netmask 255.255.255.0
5  set serverip 192.168.2.2 
6  set imagefile ryu/uImage
7  set devicetreefile ryu/shark-ryu.dtb
8  set nfspath /srv/nfs/root-shark
9  set bootcmd_tftpnfs_static 'tftp 800000 ${imagefile}; tftp 7f0000 ${devicetreefile}; set bootargs ${linux_console} root=/dev/nfs nfsroot=${serverip}:${nfspath} rw ip=${ipaddr}; mw f0000b00 ${psc_cfg}; bootm 800000 - 7f0000'
10  set bootcmd 'run bootcmd_tftpnfs_static'
11
12 * Questions
13 ** Why crc modification alone does not produce an error and is not returned by cangw -L? 
14 * Kernel oops
15 ** Linux-2.6.33.7-00005-ge2f49b5 
16 Unable to handle kernel paging request for instruction fetch
17 Faulting instruction address: 0x8d56acc4
18 Oops: Kernel access of bad area, sig: 11 [#1]
19 PREEMPT Shark
20 Modules linked in:
21 NIP: 8d56acc4 LR: c0260208 CTR: 8d56acc7
22 REGS: c03f3bc0 TRAP: 0400   Not tainted  (2.6.33.7-00005-ge2f49b5)
23 MSR: 20009032 <EE,ME,IR,DR>  CR: 24008028  XER: 0000005f
24 TASK = c03d5478[0] 'swapper' THREAD: c03f2000
25 GPR00: 8d56acc7 c03f3c70 c03d5478 c7990160 c78d54ad 00000048 c79901a4 00000002 
26 GPR08: 000000e0 00000000 00000001 00000002 44000022 7f33ff4e 80000000 00000042 
27 GPR16: 00000008 20000000 c909e900 00000001 c03f3d78 c79326e0 00000008 c799dc1c 
28 GPR24: c7976800 c78d54f9 c7949828 c7a06620 c03f2000 c7990160 c78d54ad c03f3c70 
29 NIP [8d56acc4] 0x8d56acc4
30 LR [c0260208] dev_queue_xmit+0x2f0/0x588
31 Call Trace:
32 [c03f3c70] [c025ff74] dev_queue_xmit+0x5c/0x588 (unreliable)
33 [c03f3ca0] [c02d62d8] can_send+0x9c/0x1a0
34 [c03f3cc0] [c02d97a8] can_can_gw_rcv+0x108/0x164
35 [c03f3cf0] [c02d52ac] can_rcv_filter+0x240/0x2e8
36 [c03f3d10] [c02d541c] can_rcv+0xc8/0x140
37 [c03f3d30] [c025e968] netif_receive_skb+0x2ac/0x350
38 [c03f3d70] [c020a230] mscan_rx_poll+0x1c0/0x464
39 [c03f3dd0] [c025f428] net_rx_action+0x104/0x22c
40 [c03f3e20] [c00345e0] __do_softirq+0x10c/0x21c
41 [c03f3e70] [c00075d0] do_softirq+0x58/0x74
42 [c03f3e80] [c0034300] irq_exit+0x8c/0xbc
43 [c03f3e90] [c00076c8] do_IRQ+0xdc/0x188
44 [c03f3ec0] [c0014db0] ret_from_except+0x0/0x14
45 --- Exception: 501 at cpu_idle+0x104/0x114
46     LR = cpu_idle+0x104/0x114
47 [c03f3f80] [c000aefc] cpu_idle+0x68/0x114 (unreliable)
48 [c03f3fa0] [c0003f40] rest_init+0xa4/0xc4
49 [c03f3fc0] [c039f918] start_kernel+0x2e4/0x2fc
50 [c03f3ff0] [00003438] 0x3438
51 Instruction dump:
52 XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX 
53 XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX 
54 Kernel panic - not syncing: Fatal exception in interrupt
55 Call Trace:
56 [c03f3ab0] [c0009cdc] show_stack+0x70/0x1d4 (unreliable)
57 [c03f3b00] [c02ff5ec] dump_stack+0x2c/0x44
58 [c03f3b10] [c02ff6b0] panic+0xac/0x19c
59 [c03f3b60] [c0011df8] die+0x188/0x1a4
60 [c03f3b90] [c00165c8] bad_page_fault+0x90/0xe0
61 [c03f3bb0] [c0014bb8] handle_page_fault+0x7c/0x80
62 --- Exception: 400 at 0x8d56acc4
63     LR = dev_queue_xmit+0x2f0/0x588
64 [c03f3c70] [c025ff74] dev_queue_xmit+0x5c/0x588 (unreliable)
65 [c03f3ca0] [c02d62d8] can_send+0x9c/0x1a0
66 [c03f3cc0] [c02d97a8] can_can_gw_rcv+0x108/0x164
67 [c03f3cf0] [c02d52ac] can_rcv_filter+0x240/0x2e8
68 [c03f3d10] [c02d541c] can_rcv+0xc8/0x140
69 [c03f3d30] [c025e968] netif_receive_skb+0x2ac/0x350
70 [c03f3d70] [c020a230] mscan_rx_poll+0x1c0/0x464
71 [c03f3dd0] [c025f428] net_rx_action+0x104/0x22c
72 [c03f3e20] [c00345e0] __do_softirq+0x10c/0x21c
73 [c03f3e70] [c00075d0] do_softirq+0x58/0x74
74 [c03f3e80] [c0034300] irq_exit+0x8c/0xbc
75 [c03f3e90] [c00076c8] do_IRQ+0xdc/0x188
76 [c03f3ec0] [c0014db0] ret_from_except+0x0/0x14
77 --- Exception: 501 at cpu_idle+0x104/0x114
78     LR = cpu_idle+0x104/0x114
79 [c03f3f80] [c000aefc] cpu_idle+0x68/0x114 (unreliable)
80 [c03f3fa0] [c0003f40] rest_init+0xa4/0xc4
81 [c03f3fc0] [c039f918] start_kernel+0x2e4/0x2fc
82 [c03f3ff0] [00003438] 0x3438
83 Rebooting in 180 seconds..
84
85 * write returns ENOBUFS
86 The reason is queuing discipline - the queue is full and
87 pfifo_fast_enqueue returns failure. This failure is propagated down to
88 qdisc_enqueue_root(), __dev_xmit_skb(), dev_queue_xmit() and
89 can_send()
90
91 ** See the follwing trace
92
93 # tracer: function_graph
94 #
95 # CPU  DURATION                  FUNCTION CALLS
96 # |     |   |                     |   |   |   |
97  1)               |  can_send() {
98  1)               |    dev_queue_xmit() {
99  1)               |      rt_spin_lock() {
100  1)   0.894 us    |        __might_sleep();
101  1)   2.750 us    |      }
102  1)               |      pfifo_fast_enqueue() {
103  1)               |        kfree_skb() {
104  1)               |          __kfree_skb() {
105  1)               |            skb_release_head_state() {
106  1)               |              sock_wfree() {
107  1)               |                sock_def_write_space() {
108  1)               |                  rt_read_lock() {
109  1)               |                    __rt_spin_lock() {
110  1)   0.677 us    |                      __might_sleep();
111  1)   2.386 us    |                    }
112  1)   4.150 us    |                  }
113  1)               |                  rt_read_unlock() {
114  1)   0.918 us    |                    __rt_spin_unlock();
115  1)   2.644 us    |                  }
116  1)   9.375 us    |                }
117  1) + 10.999 us   |              }
118  1) + 12.708 us   |            }
119  1)               |            skb_release_data() {
120  1)               |              kfree() {
121  1)               |                _slab_irq_disable() {
122  1)               |                  rt_spin_lock() {
123  1)   0.849 us    |                    __might_sleep();
124  1)   2.588 us    |                  }
125  1)   4.242 us    |                }
126  1)   0.864 us    |                __cache_free();
127  1)   0.888 us    |                rt_spin_unlock();
128  1)   9.456 us    |              }
129  1) + 11.185 us   |            }
130  1)               |            kmem_cache_free() {
131  1)               |              _slab_irq_disable() {
132  1)               |                rt_spin_lock() {
133  1)   0.812 us    |                  __might_sleep();
134  1)   2.542 us    |                }
135  1)   4.240 us    |              }
136  1)   0.813 us    |              __cache_free();
137  1)   0.894 us    |              rt_spin_unlock();
138  1)   9.329 us    |            }
139  1) + 36.444 us   |          }
140  1) + 38.136 us   |        }
141  1) + 39.974 us   |      }
142  1)               |      /* qdisc_enqueue_root = 1 */
143  1)               |      __qdisc_run() {
144  1)               |        sch_direct_xmit() {
145  1)   0.855 us    |          rt_spin_unlock();
146  1)               |          rt_spin_lock() {
147  1)   0.827 us    |            __might_sleep();
148  1)   2.521 us    |          }
149  1)               |          dev_hard_start_xmit() {
150  1)               |            sja1000_start_xmit() {
151  1)   1.646 us    |              kvaser_pci_write_reg();
152  1)   1.460 us    |              kvaser_pci_write_reg();
153  1)   1.691 us    |              kvaser_pci_write_reg();
154  1)   1.450 us    |              kvaser_pci_write_reg();
155  1)   1.416 us    |              kvaser_pci_write_reg();
156  1)               |              can_put_echo_skb() {
157  1)               |                sock_wfree() {
158  1)               |                  sock_def_write_space() {
159  1)               |                    rt_read_lock() {
160  1)               |                      __rt_spin_lock() {
161  1)   0.939 us    |                        __might_sleep();
162  1)   2.699 us    |                      }
163  1)   4.337 us    |                    }
164  1)               |                    rt_read_unlock() {
165  1)   0.844 us    |                      __rt_spin_unlock();
166  1)   2.507 us    |                    }
167  1)   9.242 us    |                  }
168  1) + 11.047 us   |                }
169  1) + 12.791 us   |              }
170  1)               |              sja1000_write_cmdreg() {
171  1)               |                rt_spin_lock() {
172  1)   0.802 us    |                  __might_sleep();
173  1)   2.532 us    |                }
174  1)   1.481 us    |                kvaser_pci_write_reg();
175  1)   2.155 us    |                kvaser_pci_read_reg();
176  1)   0.843 us    |                rt_spin_unlock();
177  1) + 11.100 us   |              }
178  1) + 38.263 us   |            }
179  1)               |            /* ops->ndo_start_xmit(skb, dev) = 0 */
180  1) + 41.508 us   |          }
181  1)               |          /* dev_hard_start_xmit(skb, dev) = 0 */
182  1)   0.884 us    |          rt_spin_unlock();
183  1)               |          rt_spin_lock() {
184  1)   0.817 us    |            __might_sleep();
185  1)   2.583 us    |          }
186  1)               |          /* qdisc_len = 9 */
187  1)               |          /* sch_direct_xmit returns 0 */
188  1) + 57.502 us   |        }
189  1) + 59.288 us   |      }
190  1)   0.877 us    |      rt_spin_unlock();
191  1)               |      /* __dev_xmit_skb = 1 */
192  1) ! 111.265 us  |    }
193  1)               |  /* dev_queue_xmit = 1 */
194  1)               |  /* net_xmit_errno = -105 */
195  1)   0.956 us    |    kfree_skb();
196  1) ! 117.340 us  |  }
197  1)               |  /* write returned -1 */
198
199 ** The trace was generated by kernel v2.6.33.7-rt29-4-g92487e4 with the following patch applied:
200
201 diff --git a/net/can/af_can.c b/net/can/af_can.c
202 index 4d8479d7..c91968d 100644
203 --- a/net/can/af_can.c
204 +++ b/net/can/af_can.c
205 @@ -344,8 +344,11 @@ int can_send(struct sk_buff *skb, int loop)
206  
207         /* send to netdevice */
208         err = dev_queue_xmit(skb);
209 -       if (err > 0)
210 +       trace_printk("dev_queue_xmit = %d\n", err);
211 +       if (err > 0) {
212                 err = net_xmit_errno(err);
213 +               trace_printk("net_xmit_errno = %d\n", err);
214 +       }
215  
216         if (err) {
217  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
218 diff --git a/net/core/dev.c b/net/core/dev.c
219 index 7a5412e..10ca9b4 100644
220 --- a/net/core/dev.c
221 +++ b/net/core/dev.c
222 @@ -1828,6 +1828,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
223                         skb_dst_drop(skb);
224  
225                 rc = ops->ndo_start_xmit(skb, dev);
226 +               trace_printk("ops->ndo_start_xmit(skb, dev) = %d\n", rc);
227 +
228                 if (rc == NETDEV_TX_OK)
229                         txq_trans_update(txq);
230                 /*
231 @@ -1854,6 +1856,7 @@ gso:
232                 skb->next = nskb->next;
233                 nskb->next = NULL;
234                 rc = ops->ndo_start_xmit(nskb, dev);
235 +               trace_printk("gso: ops->ndo_start_xmit(skb, dev) = %d\n", rc);
236                 if (unlikely(rc != NETDEV_TX_OK)) {
237                         if (rc & ~NETDEV_TX_MASK)
238                                 goto out_kfree_gso_skb;
239 @@ -1949,6 +1952,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
240         spin_lock(root_lock);
241         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
242                 kfree_skb(skb);
243 +               trace_printk("QDISC_STATE_DEACTIVATED\n");
244 +
245                 rc = NET_XMIT_DROP;
246         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
247                    !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
248 @@ -1966,6 +1971,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
249                 rc = NET_XMIT_SUCCESS;
250         } else {
251                 rc = qdisc_enqueue_root(skb, q);
252 +               trace_printk("qdisc_enqueue_root = %d\n", rc);
253                 qdisc_run(q);
254         }
255         spin_unlock(root_lock);
256 @@ -2047,6 +2053,7 @@ gso:
257  #endif
258         if (q->enqueue) {
259                 rc = __dev_xmit_skb(skb, q, dev, txq);
260 +               trace_printk("__dev_xmit_skb = %d\n", rc);
261                 goto out;
262         }
263  
264 @@ -2072,6 +2079,8 @@ gso:
265  
266                         if (!netif_tx_queue_stopped(txq)) {
267                                 rc = dev_hard_start_xmit(skb, dev, txq);
268 +                               trace_printk("dev_hard_start_xmit = %d\n", rc);
269 +
270                                 if (dev_xmit_complete(rc)) {
271                                         HARD_TX_UNLOCK(dev, txq);
272                                         goto out;
273 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
274 index bf7f50e..aea0833 100644
275 --- a/net/sched/sch_generic.c
276 +++ b/net/sched/sch_generic.c
277 @@ -125,6 +125,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
278         HARD_TX_LOCK(dev, txq);
279         if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
280                 ret = dev_hard_start_xmit(skb, dev, txq);
281 +       trace_printk("dev_hard_start_xmit(skb, dev) = %d\n", ret);
282  
283         HARD_TX_UNLOCK(dev, txq);
284  
285 @@ -133,9 +134,11 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
286         if (dev_xmit_complete(ret)) {
287                 /* Driver sent out skb successfully or skb was consumed */
288                 ret = qdisc_qlen(q);
289 +               trace_printk("qdisc_len = %d\n", ret);
290         } else if (ret == NETDEV_TX_LOCKED) {
291                 /* Driver try lock failed */
292                 ret = handle_dev_cpu_collision(skb, txq, q);
293 +               trace_printk("handle_dev_cpu_collision = %d\n", ret);
294         } else {
295                 /* Driver returned NETDEV_TX_BUSY - requeue skb */
296                 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
297 @@ -143,12 +146,13 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
298                                dev->name, ret, q->q.qlen);
299  
300                 ret = dev_requeue_skb(skb, q);
301 +               trace_printk("dev_requeue_skb = %d\n", ret);
302         }
303  
304         if (ret && (netif_tx_queue_stopped(txq) ||
305                     netif_tx_queue_frozen(txq)))
306                 ret = 0;
307 -
308 +       trace_printk("%s returns %d\n", __FUNCTION__, ret);
309         return ret;
310  }
311