1 From 24585ffe4ab2aba240cdd17c04143fb9ef375e4e Mon Sep 17 00:00:00 2001
2 From: Ingo Molnar <mingo@elte.hu>
3 Date: Tue, 29 Nov 2011 20:18:22 -0500
4 Subject: [PATCH 108/365] tasklet: Prevent tasklets from going into infinite
7 When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
8 and spinlocks turn are mutexes. But this can cause issues with
9 tasks disabling tasklets. A tasklet runs under ksoftirqd, and
10 if a tasklets are disabled with tasklet_disable(), the tasklet
11 count is increased. When a tasklet runs, it checks this counter
12 and if it is set, it adds itself back on the softirq queue and
15 The problem arises in RT because ksoftirq will see that a softirq
16 is ready to run (the tasklet softirq just re-armed itself), and will
17 not sleep, but instead run the softirqs again. The tasklet softirq
18 will still see that the count is non-zero and will not execute
19 the tasklet and requeue itself on the softirq again, which will
20 cause ksoftirqd to run it again and again and again.
22 It gets worse because ksoftirqd runs as a real-time thread.
23 If it preempted the task that disabled tasklets, and that task
24 has migration disabled, or can't run for other reasons, the tasklet
25 softirq will never run because the count will never be zero, and
26 ksoftirqd will go into an infinite loop. As an RT task, it this
27 becomes a big problem.
29 This is a hack solution to have tasklet_disable stop tasklets, and
30 when a tasklet runs, instead of requeueing the tasklet softirqd
31 it delays it. When tasklet_enable() is called, and tasklets are
32 waiting, then the tasklet_enable() will kick the tasklets to continue.
33 This prevents the lock up from ksoftirq going into an infinite loop.
35 [ rostedt@goodmis.org: ported to 3.0-rt ]
37 Signed-off-by: Ingo Molnar <mingo@elte.hu>
38 Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
39 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
41 include/linux/interrupt.h | 33 ++++----
42 kernel/softirq.c | 201 +++++++++++++++++++++++++++++++++-------------
43 2 files changed, 162 insertions(+), 72 deletions(-)
45 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
46 index 248380d..8564dba 100644
47 --- a/include/linux/interrupt.h
48 +++ b/include/linux/interrupt.h
49 @@ -482,8 +482,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
50 to be executed on some cpu at least once after this.
51 * If the tasklet is already scheduled, but its execution is still not
52 started, it will be executed only once.
53 - * If this tasklet is already running on another CPU (or schedule is called
54 - from tasklet itself), it is rescheduled for later.
55 + * If this tasklet is already running on another CPU, it is rescheduled
57 + * Schedule must not be called from the tasklet itself (a lockup occurs)
58 * Tasklet is strictly serialized wrt itself, but not
59 wrt another tasklets. If client needs some intertask synchronization,
60 he makes it with spinlocks.
61 @@ -508,27 +509,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
64 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
65 - TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
66 + TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
67 + TASKLET_STATE_PENDING /* Tasklet is pending */
71 +#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
72 +#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
73 +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
75 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
76 static inline int tasklet_trylock(struct tasklet_struct *t)
78 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
81 +static inline int tasklet_tryunlock(struct tasklet_struct *t)
83 + return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
86 static inline void tasklet_unlock(struct tasklet_struct *t)
88 smp_mb__before_atomic();
89 clear_bit(TASKLET_STATE_RUN, &(t)->state);
92 -static inline void tasklet_unlock_wait(struct tasklet_struct *t)
94 - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
96 +extern void tasklet_unlock_wait(struct tasklet_struct *t);
99 #define tasklet_trylock(t) 1
100 +#define tasklet_tryunlock(t) 1
101 #define tasklet_unlock_wait(t) do { } while (0)
102 #define tasklet_unlock(t) do { } while (0)
104 @@ -577,12 +587,7 @@ static inline void tasklet_disable(struct tasklet_struct *t)
108 -static inline void tasklet_enable(struct tasklet_struct *t)
110 - smp_mb__before_atomic();
111 - atomic_dec(&t->count);
114 +extern void tasklet_enable(struct tasklet_struct *t);
115 extern void tasklet_kill(struct tasklet_struct *t);
116 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
117 extern void tasklet_init(struct tasklet_struct *t,
118 diff --git a/kernel/softirq.c b/kernel/softirq.c
119 index 479e443..b7e9846 100644
120 --- a/kernel/softirq.c
121 +++ b/kernel/softirq.c
123 #include <linux/freezer.h>
124 #include <linux/kthread.h>
125 #include <linux/rcupdate.h>
126 +#include <linux/delay.h>
127 #include <linux/ftrace.h>
128 #include <linux/smp.h>
129 #include <linux/smpboot.h>
130 @@ -446,15 +447,45 @@ struct tasklet_head {
131 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
132 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
135 +__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
137 + if (tasklet_trylock(t)) {
139 + /* We may have been preempted before tasklet_trylock
140 + * and __tasklet_action may have already run.
141 + * So double check the sched bit while the takslet
142 + * is locked before adding it to the list.
144 + if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
147 + head->tail = &(t->next);
148 + raise_softirq_irqoff(nr);
151 + /* This is subtle. If we hit the corner case above
152 + * It is possible that we get preempted right here,
153 + * and another task has successfully called
154 + * tasklet_schedule(), then this function, and
155 + * failed on the trylock. Thus we must be sure
156 + * before releasing the tasklet lock, that the
157 + * SCHED_BIT is clear. Otherwise the tasklet
158 + * may get its SCHED_BIT set, but not added to the
161 + if (!tasklet_tryunlock(t))
167 void __tasklet_schedule(struct tasklet_struct *t)
171 local_irq_save(flags);
173 - *__this_cpu_read(tasklet_vec.tail) = t;
174 - __this_cpu_write(tasklet_vec.tail, &(t->next));
175 - raise_softirq_irqoff(TASKLET_SOFTIRQ);
176 + __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
177 local_irq_restore(flags);
179 EXPORT_SYMBOL(__tasklet_schedule);
180 @@ -464,10 +495,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
183 local_irq_save(flags);
185 - *__this_cpu_read(tasklet_hi_vec.tail) = t;
186 - __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
187 - raise_softirq_irqoff(HI_SOFTIRQ);
188 + __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
189 local_irq_restore(flags);
191 EXPORT_SYMBOL(__tasklet_hi_schedule);
192 @@ -476,82 +504,122 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
194 BUG_ON(!irqs_disabled());
196 - t->next = __this_cpu_read(tasklet_hi_vec.head);
197 - __this_cpu_write(tasklet_hi_vec.head, t);
198 - __raise_softirq_irqoff(HI_SOFTIRQ);
199 + __tasklet_hi_schedule(t);
201 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
203 -static void tasklet_action(struct softirq_action *a)
204 +void tasklet_enable(struct tasklet_struct *t)
206 - struct tasklet_struct *list;
207 + if (!atomic_dec_and_test(&t->count))
209 + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
210 + tasklet_schedule(t);
212 +EXPORT_SYMBOL(tasklet_enable);
214 - local_irq_disable();
215 - list = __this_cpu_read(tasklet_vec.head);
216 - __this_cpu_write(tasklet_vec.head, NULL);
217 - __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
218 - local_irq_enable();
219 +static void __tasklet_action(struct softirq_action *a,
220 + struct tasklet_struct *list)
222 + int loops = 1000000;
225 struct tasklet_struct *t = list;
229 - if (tasklet_trylock(t)) {
230 - if (!atomic_read(&t->count)) {
231 - if (!test_and_clear_bit(TASKLET_STATE_SCHED,
240 + * Should always succeed - after a tasklist got on the
241 + * list (after getting the SCHED bit set from 0 to 1),
242 + * nothing but the tasklet softirq it got queued to can
245 + if (!tasklet_trylock(t)) {
250 - local_irq_disable();
252 - *__this_cpu_read(tasklet_vec.tail) = t;
253 - __this_cpu_write(tasklet_vec.tail, &(t->next));
254 - __raise_softirq_irqoff(TASKLET_SOFTIRQ);
255 - local_irq_enable();
258 + * If we cannot handle the tasklet because it's disabled,
259 + * mark it as pending. tasklet_enable() will later
260 + * re-schedule the tasklet.
262 + if (unlikely(atomic_read(&t->count))) {
264 + /* implicit unlock: */
266 + t->state = TASKLET_STATEF_PENDING;
271 + * After this point on the tasklet might be rescheduled
272 + * on another CPU, but it can only be added to another
273 + * CPU's tasklet list if we unlock the tasklet (which we
276 + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
283 + * Try to unlock the tasklet. We must use cmpxchg, because
284 + * another CPU might have scheduled or disabled the tasklet.
285 + * We only allow the STATE_RUN -> 0 transition here.
287 + while (!tasklet_tryunlock(t)) {
289 + * If it got disabled meanwhile, bail out:
291 + if (atomic_read(&t->count))
294 + * If it got scheduled meanwhile, re-execute
295 + * the tasklet function:
297 + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
300 + printk("hm, tasklet state: %08lx\n", t->state);
309 +static void tasklet_action(struct softirq_action *a)
311 + struct tasklet_struct *list;
313 + local_irq_disable();
315 + list = __this_cpu_read(tasklet_vec.head);
316 + __this_cpu_write(tasklet_vec.head, NULL);
317 + __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
319 + local_irq_enable();
321 + __tasklet_action(a, list);
324 static void tasklet_hi_action(struct softirq_action *a)
326 struct tasklet_struct *list;
330 list = __this_cpu_read(tasklet_hi_vec.head);
331 __this_cpu_write(tasklet_hi_vec.head, NULL);
332 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
333 - local_irq_enable();
336 - struct tasklet_struct *t = list;
340 - if (tasklet_trylock(t)) {
341 - if (!atomic_read(&t->count)) {
342 - if (!test_and_clear_bit(TASKLET_STATE_SCHED,
351 + local_irq_enable();
353 - local_irq_disable();
355 - *__this_cpu_read(tasklet_hi_vec.tail) = t;
356 - __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
357 - __raise_softirq_irqoff(HI_SOFTIRQ);
358 - local_irq_enable();
360 + __tasklet_action(a, list);
363 void tasklet_init(struct tasklet_struct *t,
364 @@ -572,7 +640,7 @@ void tasklet_kill(struct tasklet_struct *t)
366 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
370 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
372 tasklet_unlock_wait(t);
373 @@ -646,6 +714,23 @@ void __init softirq_init(void)
374 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
377 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
378 +void tasklet_unlock_wait(struct tasklet_struct *t)
380 + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
382 + * Hack for now to avoid this busy-loop:
384 +#ifdef CONFIG_PREEMPT_RT_FULL
391 +EXPORT_SYMBOL(tasklet_unlock_wait);
394 static int ksoftirqd_should_run(unsigned int cpu)
396 return local_softirq_pending();