]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0108-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch
WAR:media:i2c:ov5693: add flip and mirror setting
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0108-tasklet-Prevent-tasklets-from-going-into-infinite-sp.patch
1 From 24585ffe4ab2aba240cdd17c04143fb9ef375e4e Mon Sep 17 00:00:00 2001
2 From: Ingo Molnar <mingo@elte.hu>
3 Date: Tue, 29 Nov 2011 20:18:22 -0500
4 Subject: [PATCH 108/365] tasklet: Prevent tasklets from going into infinite
5  spin in RT
6
7 When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
8 and spinlocks turn are mutexes. But this can cause issues with
9 tasks disabling tasklets. A tasklet runs under ksoftirqd, and
10 if a tasklets are disabled with tasklet_disable(), the tasklet
11 count is increased. When a tasklet runs, it checks this counter
12 and if it is set, it adds itself back on the softirq queue and
13 returns.
14
15 The problem arises in RT because ksoftirq will see that a softirq
16 is ready to run (the tasklet softirq just re-armed itself), and will
17 not sleep, but instead run the softirqs again. The tasklet softirq
18 will still see that the count is non-zero and will not execute
19 the tasklet and requeue itself on the softirq again, which will
20 cause ksoftirqd to run it again and again and again.
21
22 It gets worse because ksoftirqd runs as a real-time thread.
23 If it preempted the task that disabled tasklets, and that task
24 has migration disabled, or can't run for other reasons, the tasklet
25 softirq will never run because the count will never be zero, and
26 ksoftirqd will go into an infinite loop. As an RT task, it this
27 becomes a big problem.
28
29 This is a hack solution to have tasklet_disable stop tasklets, and
30 when a tasklet runs, instead of requeueing the tasklet softirqd
31 it delays it. When tasklet_enable() is called, and tasklets are
32 waiting, then the tasklet_enable() will kick the tasklets to continue.
33 This prevents the lock up from ksoftirq going into an infinite loop.
34
35 [ rostedt@goodmis.org: ported to 3.0-rt ]
36
37 Signed-off-by: Ingo Molnar <mingo@elte.hu>
38 Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
39 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
40 ---
41  include/linux/interrupt.h |  33 ++++----
42  kernel/softirq.c          | 201 +++++++++++++++++++++++++++++++++-------------
43  2 files changed, 162 insertions(+), 72 deletions(-)
44
45 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
46 index 248380d..8564dba 100644
47 --- a/include/linux/interrupt.h
48 +++ b/include/linux/interrupt.h
49 @@ -482,8 +482,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
50       to be executed on some cpu at least once after this.
51     * If the tasklet is already scheduled, but its execution is still not
52       started, it will be executed only once.
53 -   * If this tasklet is already running on another CPU (or schedule is called
54 -     from tasklet itself), it is rescheduled for later.
55 +   * If this tasklet is already running on another CPU, it is rescheduled
56 +     for later.
57 +   * Schedule must not be called from the tasklet itself (a lockup occurs)
58     * Tasklet is strictly serialized wrt itself, but not
59       wrt another tasklets. If client needs some intertask synchronization,
60       he makes it with spinlocks.
61 @@ -508,27 +509,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
62  enum
63  {
64         TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
65 -       TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
66 +       TASKLET_STATE_RUN,      /* Tasklet is running (SMP only) */
67 +       TASKLET_STATE_PENDING   /* Tasklet is pending */
68  };
69  
70 -#ifdef CONFIG_SMP
71 +#define TASKLET_STATEF_SCHED   (1 << TASKLET_STATE_SCHED)
72 +#define TASKLET_STATEF_RUN     (1 << TASKLET_STATE_RUN)
73 +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
74 +
75 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
76  static inline int tasklet_trylock(struct tasklet_struct *t)
77  {
78         return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
79  }
80  
81 +static inline int tasklet_tryunlock(struct tasklet_struct *t)
82 +{
83 +       return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
84 +}
85 +
86  static inline void tasklet_unlock(struct tasklet_struct *t)
87  {
88         smp_mb__before_atomic();
89         clear_bit(TASKLET_STATE_RUN, &(t)->state);
90  }
91  
92 -static inline void tasklet_unlock_wait(struct tasklet_struct *t)
93 -{
94 -       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
95 -}
96 +extern void tasklet_unlock_wait(struct tasklet_struct *t);
97 +
98  #else
99  #define tasklet_trylock(t) 1
100 +#define tasklet_tryunlock(t)   1
101  #define tasklet_unlock_wait(t) do { } while (0)
102  #define tasklet_unlock(t) do { } while (0)
103  #endif
104 @@ -577,12 +587,7 @@ static inline void tasklet_disable(struct tasklet_struct *t)
105         smp_mb();
106  }
107  
108 -static inline void tasklet_enable(struct tasklet_struct *t)
109 -{
110 -       smp_mb__before_atomic();
111 -       atomic_dec(&t->count);
112 -}
113 -
114 +extern void tasklet_enable(struct tasklet_struct *t);
115  extern void tasklet_kill(struct tasklet_struct *t);
116  extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
117  extern void tasklet_init(struct tasklet_struct *t,
118 diff --git a/kernel/softirq.c b/kernel/softirq.c
119 index 479e443..b7e9846 100644
120 --- a/kernel/softirq.c
121 +++ b/kernel/softirq.c
122 @@ -21,6 +21,7 @@
123  #include <linux/freezer.h>
124  #include <linux/kthread.h>
125  #include <linux/rcupdate.h>
126 +#include <linux/delay.h>
127  #include <linux/ftrace.h>
128  #include <linux/smp.h>
129  #include <linux/smpboot.h>
130 @@ -446,15 +447,45 @@ struct tasklet_head {
131  static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
132  static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
133  
134 +static void inline
135 +__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
136 +{
137 +       if (tasklet_trylock(t)) {
138 +again:
139 +               /* We may have been preempted before tasklet_trylock
140 +                * and __tasklet_action may have already run.
141 +                * So double check the sched bit while the takslet
142 +                * is locked before adding it to the list.
143 +                */
144 +               if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
145 +                       t->next = NULL;
146 +                       *head->tail = t;
147 +                       head->tail = &(t->next);
148 +                       raise_softirq_irqoff(nr);
149 +                       tasklet_unlock(t);
150 +               } else {
151 +                       /* This is subtle. If we hit the corner case above
152 +                        * It is possible that we get preempted right here,
153 +                        * and another task has successfully called
154 +                        * tasklet_schedule(), then this function, and
155 +                        * failed on the trylock. Thus we must be sure
156 +                        * before releasing the tasklet lock, that the
157 +                        * SCHED_BIT is clear. Otherwise the tasklet
158 +                        * may get its SCHED_BIT set, but not added to the
159 +                        * list
160 +                        */
161 +                       if (!tasklet_tryunlock(t))
162 +                               goto again;
163 +               }
164 +       }
165 +}
166 +
167  void __tasklet_schedule(struct tasklet_struct *t)
168  {
169         unsigned long flags;
170  
171         local_irq_save(flags);
172 -       t->next = NULL;
173 -       *__this_cpu_read(tasklet_vec.tail) = t;
174 -       __this_cpu_write(tasklet_vec.tail, &(t->next));
175 -       raise_softirq_irqoff(TASKLET_SOFTIRQ);
176 +       __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
177         local_irq_restore(flags);
178  }
179  EXPORT_SYMBOL(__tasklet_schedule);
180 @@ -464,10 +495,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
181         unsigned long flags;
182  
183         local_irq_save(flags);
184 -       t->next = NULL;
185 -       *__this_cpu_read(tasklet_hi_vec.tail) = t;
186 -       __this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
187 -       raise_softirq_irqoff(HI_SOFTIRQ);
188 +       __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
189         local_irq_restore(flags);
190  }
191  EXPORT_SYMBOL(__tasklet_hi_schedule);
192 @@ -476,82 +504,122 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
193  {
194         BUG_ON(!irqs_disabled());
195  
196 -       t->next = __this_cpu_read(tasklet_hi_vec.head);
197 -       __this_cpu_write(tasklet_hi_vec.head, t);
198 -       __raise_softirq_irqoff(HI_SOFTIRQ);
199 +       __tasklet_hi_schedule(t);
200  }
201  EXPORT_SYMBOL(__tasklet_hi_schedule_first);
202  
203 -static void tasklet_action(struct softirq_action *a)
204 +void  tasklet_enable(struct tasklet_struct *t)
205  {
206 -       struct tasklet_struct *list;
207 +       if (!atomic_dec_and_test(&t->count))
208 +               return;
209 +       if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
210 +               tasklet_schedule(t);
211 +}
212 +EXPORT_SYMBOL(tasklet_enable);
213  
214 -       local_irq_disable();
215 -       list = __this_cpu_read(tasklet_vec.head);
216 -       __this_cpu_write(tasklet_vec.head, NULL);
217 -       __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
218 -       local_irq_enable();
219 +static void __tasklet_action(struct softirq_action *a,
220 +                            struct tasklet_struct *list)
221 +{
222 +       int loops = 1000000;
223  
224         while (list) {
225                 struct tasklet_struct *t = list;
226  
227                 list = list->next;
228  
229 -               if (tasklet_trylock(t)) {
230 -                       if (!atomic_read(&t->count)) {
231 -                               if (!test_and_clear_bit(TASKLET_STATE_SCHED,
232 -                                                       &t->state))
233 -                                       BUG();
234 -                               t->func(t->data);
235 -                               tasklet_unlock(t);
236 -                               continue;
237 -                       }
238 -                       tasklet_unlock(t);
239 +               /*
240 +                * Should always succeed - after a tasklist got on the
241 +                * list (after getting the SCHED bit set from 0 to 1),
242 +                * nothing but the tasklet softirq it got queued to can
243 +                * lock it:
244 +                */
245 +               if (!tasklet_trylock(t)) {
246 +                       WARN_ON(1);
247 +                       continue;
248                 }
249  
250 -               local_irq_disable();
251                 t->next = NULL;
252 -               *__this_cpu_read(tasklet_vec.tail) = t;
253 -               __this_cpu_write(tasklet_vec.tail, &(t->next));
254 -               __raise_softirq_irqoff(TASKLET_SOFTIRQ);
255 -               local_irq_enable();
256 +
257 +               /*
258 +                * If we cannot handle the tasklet because it's disabled,
259 +                * mark it as pending. tasklet_enable() will later
260 +                * re-schedule the tasklet.
261 +                */
262 +               if (unlikely(atomic_read(&t->count))) {
263 +out_disabled:
264 +                       /* implicit unlock: */
265 +                       wmb();
266 +                       t->state = TASKLET_STATEF_PENDING;
267 +                       continue;
268 +               }
269 +
270 +               /*
271 +                * After this point on the tasklet might be rescheduled
272 +                * on another CPU, but it can only be added to another
273 +                * CPU's tasklet list if we unlock the tasklet (which we
274 +                * dont do yet).
275 +                */
276 +               if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
277 +                       WARN_ON(1);
278 +
279 +again:
280 +               t->func(t->data);
281 +
282 +               /*
283 +                * Try to unlock the tasklet. We must use cmpxchg, because
284 +                * another CPU might have scheduled or disabled the tasklet.
285 +                * We only allow the STATE_RUN -> 0 transition here.
286 +                */
287 +               while (!tasklet_tryunlock(t)) {
288 +                       /*
289 +                        * If it got disabled meanwhile, bail out:
290 +                        */
291 +                       if (atomic_read(&t->count))
292 +                               goto out_disabled;
293 +                       /*
294 +                        * If it got scheduled meanwhile, re-execute
295 +                        * the tasklet function:
296 +                        */
297 +                       if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
298 +                               goto again;
299 +                       if (!--loops) {
300 +                               printk("hm, tasklet state: %08lx\n", t->state);
301 +                               WARN_ON(1);
302 +                               tasklet_unlock(t);
303 +                               break;
304 +                       }
305 +               }
306         }
307  }
308  
309 +static void tasklet_action(struct softirq_action *a)
310 +{
311 +       struct tasklet_struct *list;
312 +
313 +       local_irq_disable();
314 +
315 +       list = __this_cpu_read(tasklet_vec.head);
316 +       __this_cpu_write(tasklet_vec.head, NULL);
317 +       __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
318 +
319 +       local_irq_enable();
320 +
321 +       __tasklet_action(a, list);
322 +}
323 +
324  static void tasklet_hi_action(struct softirq_action *a)
325  {
326         struct tasklet_struct *list;
327  
328         local_irq_disable();
329 +
330         list = __this_cpu_read(tasklet_hi_vec.head);
331         __this_cpu_write(tasklet_hi_vec.head, NULL);
332         __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
333 -       local_irq_enable();
334 -
335 -       while (list) {
336 -               struct tasklet_struct *t = list;
337  
338 -               list = list->next;
339 -
340 -               if (tasklet_trylock(t)) {
341 -                       if (!atomic_read(&t->count)) {
342 -                               if (!test_and_clear_bit(TASKLET_STATE_SCHED,
343 -                                                       &t->state))
344 -                                       BUG();
345 -                               t->func(t->data);
346 -                               tasklet_unlock(t);
347 -                               continue;
348 -                       }
349 -                       tasklet_unlock(t);
350 -               }
351 +       local_irq_enable();
352  
353 -               local_irq_disable();
354 -               t->next = NULL;
355 -               *__this_cpu_read(tasklet_hi_vec.tail) = t;
356 -               __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
357 -               __raise_softirq_irqoff(HI_SOFTIRQ);
358 -               local_irq_enable();
359 -       }
360 +       __tasklet_action(a, list);
361  }
362  
363  void tasklet_init(struct tasklet_struct *t,
364 @@ -572,7 +640,7 @@ void tasklet_kill(struct tasklet_struct *t)
365  
366         while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
367                 do {
368 -                       yield();
369 +                       msleep(1);
370                 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
371         }
372         tasklet_unlock_wait(t);
373 @@ -646,6 +714,23 @@ void __init softirq_init(void)
374         open_softirq(HI_SOFTIRQ, tasklet_hi_action);
375  }
376  
377 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
378 +void tasklet_unlock_wait(struct tasklet_struct *t)
379 +{
380 +       while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
381 +               /*
382 +                * Hack for now to avoid this busy-loop:
383 +                */
384 +#ifdef CONFIG_PREEMPT_RT_FULL
385 +               msleep(1);
386 +#else
387 +               barrier();
388 +#endif
389 +       }
390 +}
391 +EXPORT_SYMBOL(tasklet_unlock_wait);
392 +#endif
393 +
394  static int ksoftirqd_should_run(unsigned int cpu)
395  {
396         return local_softirq_pending();
397 -- 
398 2.7.4
399