2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/export.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/delay.h>
25 #include <linux/ftrace.h>
26 #include <linux/smp.h>
27 #include <linux/smpboot.h>
28 #include <linux/tick.h>
29 #include <linux/locallock.h>
30 #include <linux/irq.h>
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/irq.h>
36 - No shared variables, all the data are CPU local.
37 - If a softirq needs serialization, let it serialize itself
39 - Even if softirq is serialized, only local cpu is marked for
40 execution. Hence, we get something sort of weak cpu binding.
41 Though it is still not clear, will it result in better locality
45 - NET RX softirq. It is multithreaded and does not require
46 any global serialization.
47 - NET TX softirq. It kicks software netdevice queues, hence
48 it is logically serialized per device, but this serialization
49 is invisible to common code.
50 - Tasklets: serialized wrt itself.
53 #ifndef __ARCH_IRQ_STAT
54 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
55 EXPORT_SYMBOL(irq_stat);
58 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
60 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62 const char * const softirq_to_name[NR_SOFTIRQS] = {
63 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
64 "TASKLET", "SCHED", "HRTIMER", "RCU"
67 #ifdef CONFIG_NO_HZ_COMMON
68 # ifdef CONFIG_PREEMPT_RT_FULL
70 struct softirq_runner {
71 struct task_struct *runner[NR_SOFTIRQS];
74 static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
76 static inline void softirq_set_runner(unsigned int sirq)
78 struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
80 sr->runner[sirq] = current;
83 static inline void softirq_clr_runner(unsigned int sirq)
85 struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
87 sr->runner[sirq] = NULL;
91 * On preempt-rt a softirq running context might be blocked on a
92 * lock. There might be no other runnable task on this CPU because the
93 * lock owner runs on some other CPU. So we have to go into idle with
94 * the pending bit set. Therefor we need to check this otherwise we
95 * warn about false positives which confuses users and defeats the
96 * whole purpose of this test.
98 * This code is called with interrupts disabled.
100 void softirq_check_pending_idle(void)
102 static int rate_limit;
103 struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
107 if (rate_limit >= 10)
110 warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
111 for (i = 0; i < NR_SOFTIRQS; i++) {
112 struct task_struct *tsk = sr->runner[i];
115 * The wakeup code in rtmutex.c wakes up the task
116 * _before_ it sets pi_blocked_on to NULL under
117 * tsk->pi_lock. So we need to check for both: state
121 raw_spin_lock(&tsk->pi_lock);
122 if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
123 /* Clear all bits pending in that task */
124 warnpending &= ~(tsk->softirqs_raised);
125 warnpending &= ~(1 << i);
127 raw_spin_unlock(&tsk->pi_lock);
132 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
139 * On !PREEMPT_RT we just printk rate limited:
141 void softirq_check_pending_idle(void)
143 static int rate_limit;
145 if (rate_limit < 10 &&
146 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
147 printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
148 local_softirq_pending());
154 #else /* !CONFIG_NO_HZ_COMMON */
155 static inline void softirq_set_runner(unsigned int sirq) { }
156 static inline void softirq_clr_runner(unsigned int sirq) { }
160 * we cannot loop indefinitely here to avoid userspace starvation,
161 * but we also don't want to introduce a worst case 1/HZ latency
162 * to the pending events, so lets the scheduler to balance
163 * the softirq load for us.
165 static void wakeup_softirqd(void)
167 /* Interrupts are disabled: no need to stop preemption */
168 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
170 if (tsk && tsk->state != TASK_RUNNING)
171 wake_up_process(tsk);
174 static void handle_softirq(unsigned int vec_nr)
176 struct softirq_action *h = softirq_vec + vec_nr;
179 prev_count = preempt_count();
181 kstat_incr_softirqs_this_cpu(vec_nr);
183 trace_softirq_entry(vec_nr);
185 trace_softirq_exit(vec_nr);
186 if (unlikely(prev_count != preempt_count())) {
187 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
188 vec_nr, softirq_to_name[vec_nr], h->action,
189 prev_count, preempt_count());
190 preempt_count_set(prev_count);
194 #ifndef CONFIG_PREEMPT_RT_FULL
195 static inline int ksoftirqd_softirq_pending(void)
197 return local_softirq_pending();
200 static void handle_pending_softirqs(u32 pending, int need_rcu_bh_qs)
202 struct softirq_action *h = softirq_vec;
209 while ((softirq_bit = ffs(pending))) {
212 h += softirq_bit - 1;
213 vec_nr = h - softirq_vec;
214 handle_softirq(vec_nr);
217 pending >>= softirq_bit;
225 static void run_ksoftirqd(unsigned int cpu)
228 if (ksoftirqd_softirq_pending()) {
231 cond_resched_rcu_qs();
238 * preempt_count and SOFTIRQ_OFFSET usage:
239 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
240 * softirq processing.
241 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
242 * on local_bh_disable or local_bh_enable.
243 * This lets us distinguish between whether we are currently processing
244 * softirq and whether we just have bh disabled.
248 * This one is for softirq.c-internal use,
249 * where hardirqs are disabled legitimately:
251 #ifdef CONFIG_TRACE_IRQFLAGS
252 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
256 WARN_ON_ONCE(in_irq());
258 raw_local_irq_save(flags);
260 * The preempt tracer hooks into preempt_count_add and will break
261 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
262 * is set and before current->softirq_enabled is cleared.
263 * We must manually increment preempt_count here and manually
264 * call the trace_preempt_off later.
266 __preempt_count_add(cnt);
268 * Were softirqs turned off above:
270 if (softirq_count() == (cnt & SOFTIRQ_MASK))
271 trace_softirqs_off(ip);
272 raw_local_irq_restore(flags);
274 if (preempt_count() == cnt) {
275 #ifdef CONFIG_DEBUG_PREEMPT
276 current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1);
278 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
281 EXPORT_SYMBOL(__local_bh_disable_ip);
282 #endif /* CONFIG_TRACE_IRQFLAGS */
284 static void __local_bh_enable(unsigned int cnt)
286 WARN_ON_ONCE(!irqs_disabled());
288 if (softirq_count() == (cnt & SOFTIRQ_MASK))
289 trace_softirqs_on(_RET_IP_);
290 preempt_count_sub(cnt);
294 * Special-case - softirqs can safely be enabled in
295 * cond_resched_softirq(), or by __do_softirq(),
296 * without processing still-pending softirqs:
298 void _local_bh_enable(void)
300 WARN_ON_ONCE(in_irq());
301 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
303 EXPORT_SYMBOL(_local_bh_enable);
305 void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
307 WARN_ON_ONCE(in_irq() || irqs_disabled());
308 #ifdef CONFIG_TRACE_IRQFLAGS
312 * Are softirqs going to be turned on now:
314 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
315 trace_softirqs_on(ip);
317 * Keep preemption disabled until we are done with
318 * softirq processing:
320 preempt_count_sub(cnt - 1);
322 if (unlikely(!in_interrupt() && local_softirq_pending())) {
324 * Run softirq if any pending. And do it in its own stack
325 * as we may be calling this deep in a task call stack already.
331 #ifdef CONFIG_TRACE_IRQFLAGS
334 preempt_check_resched();
336 EXPORT_SYMBOL(__local_bh_enable_ip);
339 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
340 * but break the loop if need_resched() is set or after 2 ms.
341 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
342 * certain cases, such as stop_machine(), jiffies may cease to
343 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
344 * well to make sure we eventually return from this method.
346 * These limits have been established via experimentation.
347 * The two things to balance is latency against fairness -
348 * we want to handle softirqs as soon as possible, but they
349 * should not be able to lock up the box.
351 #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
352 #define MAX_SOFTIRQ_RESTART 10
354 #ifdef CONFIG_TRACE_IRQFLAGS
356 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
357 * to keep the lockdep irq context tracking as tight as possible in order to
358 * not miss-qualify lock contexts and miss possible deadlocks.
361 static inline bool lockdep_softirq_start(void)
363 bool in_hardirq = false;
365 if (trace_hardirq_context(current)) {
367 trace_hardirq_exit();
370 lockdep_softirq_enter();
375 static inline void lockdep_softirq_end(bool in_hardirq)
377 lockdep_softirq_exit();
380 trace_hardirq_enter();
383 static inline bool lockdep_softirq_start(void) { return false; }
384 static inline void lockdep_softirq_end(bool in_hardirq) { }
387 asmlinkage __visible void __do_softirq(void)
389 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
390 unsigned long old_flags = current->flags;
391 int max_restart = MAX_SOFTIRQ_RESTART;
396 * Mask out PF_MEMALLOC s current task context is borrowed for the
397 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
398 * again if the socket is related to swap
400 current->flags &= ~PF_MEMALLOC;
402 pending = local_softirq_pending();
403 account_irq_enter_time(current);
405 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
406 in_hardirq = lockdep_softirq_start();
409 /* Reset the pending bitmask before enabling irqs */
410 set_softirq_pending(0);
412 handle_pending_softirqs(pending, 1);
414 pending = local_softirq_pending();
416 if (time_before(jiffies, end) && !need_resched() &&
423 lockdep_softirq_end(in_hardirq);
424 account_irq_exit_time(current);
425 __local_bh_enable(SOFTIRQ_OFFSET);
426 WARN_ON_ONCE(in_interrupt());
427 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
430 asmlinkage __visible void do_softirq(void)
438 local_irq_save(flags);
440 pending = local_softirq_pending();
443 do_softirq_own_stack();
445 local_irq_restore(flags);
449 * This function must run with irqs disabled!
451 void raise_softirq_irqoff(unsigned int nr)
453 __raise_softirq_irqoff(nr);
456 * If we're in an interrupt or softirq, we're done
457 * (this also catches softirq-disabled code). We will
458 * actually run the softirq once we return from
459 * the irq or softirq.
461 * Otherwise we wake up ksoftirqd to make sure we
462 * schedule the softirq soon.
468 void __raise_softirq_irqoff(unsigned int nr)
470 trace_softirq_raise(nr);
471 or_softirq_pending(1UL << nr);
474 static inline void local_bh_disable_nort(void) { local_bh_disable(); }
475 static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
476 static void ksoftirqd_set_sched_params(unsigned int cpu) { }
477 static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
479 #else /* !PREEMPT_RT_FULL */
482 * On RT we serialize softirq execution with a cpu local lock per softirq
484 static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
486 void __init softirq_early_init(void)
490 for (i = 0; i < NR_SOFTIRQS; i++)
491 local_irq_lock_init(local_softirq_locks[i]);
494 static void lock_softirq(int which)
496 local_lock(local_softirq_locks[which]);
499 static void unlock_softirq(int which)
501 local_unlock(local_softirq_locks[which]);
504 static void do_single_softirq(int which, int need_rcu_bh_qs)
506 unsigned long old_flags = current->flags;
508 current->flags &= ~PF_MEMALLOC;
509 vtime_account_irq_enter(current);
510 current->flags |= PF_IN_SOFTIRQ;
511 lockdep_softirq_enter();
513 handle_softirq(which);
515 lockdep_softirq_exit();
516 current->flags &= ~PF_IN_SOFTIRQ;
517 vtime_account_irq_enter(current);
518 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
522 * Called with interrupts disabled. Process softirqs which were raised
523 * in current context (or on behalf of ksoftirqd).
525 static void do_current_softirqs(int need_rcu_bh_qs)
527 while (current->softirqs_raised) {
528 int i = __ffs(current->softirqs_raised);
529 unsigned int pending, mask = (1U << i);
531 current->softirqs_raised &= ~mask;
535 * If the lock is contended, we boost the owner to
536 * process the softirq or leave the critical section
541 softirq_set_runner(i);
543 * Check with the local_softirq_pending() bits,
544 * whether we need to process this still or if someone
545 * else took care of it.
547 pending = local_softirq_pending();
548 if (pending & mask) {
549 set_softirq_pending(pending & ~mask);
550 do_single_softirq(i, need_rcu_bh_qs);
552 softirq_clr_runner(i);
554 WARN_ON(current->softirq_nestcnt != 1);
558 static void __local_bh_disable(void)
560 if (++current->softirq_nestcnt == 1)
564 void local_bh_disable(void)
566 __local_bh_disable();
568 EXPORT_SYMBOL(local_bh_disable);
570 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
572 __local_bh_disable();
573 if (cnt & PREEMPT_CHECK_OFFSET)
577 static void __local_bh_enable(void)
579 if (WARN_ON(current->softirq_nestcnt == 0))
583 if (current->softirq_nestcnt == 1 && current->softirqs_raised)
584 do_current_softirqs(1);
587 if (--current->softirq_nestcnt == 0)
591 void local_bh_enable(void)
595 EXPORT_SYMBOL(local_bh_enable);
597 extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
600 if (cnt & PREEMPT_CHECK_OFFSET)
604 void local_bh_enable_ip(unsigned long ip)
608 EXPORT_SYMBOL(local_bh_enable_ip);
610 void _local_bh_enable(void)
612 if (WARN_ON(current->softirq_nestcnt == 0))
614 if (--current->softirq_nestcnt == 0)
617 EXPORT_SYMBOL(_local_bh_enable);
619 int in_serving_softirq(void)
621 return current->flags & PF_IN_SOFTIRQ;
623 EXPORT_SYMBOL(in_serving_softirq);
625 /* Called with preemption disabled */
626 static void run_ksoftirqd(unsigned int cpu)
629 current->softirq_nestcnt++;
631 do_current_softirqs(1);
632 current->softirq_nestcnt--;
633 rcu_note_context_switch();
638 * Called from netif_rx_ni(). Preemption enabled, but migration
639 * disabled. So the cpu can't go away under us.
641 void thread_do_softirq(void)
643 if (!in_serving_softirq() && current->softirqs_raised) {
644 current->softirq_nestcnt++;
645 do_current_softirqs(0);
646 current->softirq_nestcnt--;
650 static void do_raise_softirq_irqoff(unsigned int nr)
652 trace_softirq_raise(nr);
653 or_softirq_pending(1UL << nr);
656 * If we are not in a hard interrupt and inside a bh disabled
657 * region, we simply raise the flag on current. local_bh_enable()
658 * will make sure that the softirq is executed. Otherwise we
659 * delegate it to ksoftirqd.
661 if (!in_irq() && current->softirq_nestcnt)
662 current->softirqs_raised |= (1U << nr);
663 else if (__this_cpu_read(ksoftirqd))
664 __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
667 void __raise_softirq_irqoff(unsigned int nr)
669 do_raise_softirq_irqoff(nr);
670 if (!in_irq() && !current->softirq_nestcnt)
675 * This function must run with irqs disabled!
677 void raise_softirq_irqoff(unsigned int nr)
679 do_raise_softirq_irqoff(nr);
682 * If we're in an hard interrupt we let irq return code deal
683 * with the wakeup of ksoftirqd.
688 * If we are in thread context but outside of a bh disabled
689 * region, we need to wake ksoftirqd as well.
691 * CHECKME: Some of the places which do that could be wrapped
692 * into local_bh_disable/enable pairs. Though it's unclear
693 * whether this is worth the effort. To find those places just
694 * raise a WARN() if the condition is met.
696 if (!current->softirq_nestcnt)
700 static inline int ksoftirqd_softirq_pending(void)
702 return current->softirqs_raised;
705 static inline void local_bh_disable_nort(void) { }
706 static inline void _local_bh_enable_nort(void) { }
708 static inline void ksoftirqd_set_sched_params(unsigned int cpu)
710 struct sched_param param = { .sched_priority = 1 };
712 sched_setscheduler(current, SCHED_FIFO, ¶m);
713 /* Take over all pending softirqs when starting */
715 current->softirqs_raised = local_softirq_pending();
719 static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
721 struct sched_param param = { .sched_priority = 0 };
723 sched_setscheduler(current, SCHED_NORMAL, ¶m);
726 #endif /* PREEMPT_RT_FULL */
728 * Enter an interrupt context.
733 if (is_idle_task(current) && !in_interrupt()) {
735 * Prevent raise_softirq from needlessly waking up ksoftirqd
736 * here, as softirq will be serviced on return from interrupt.
738 local_bh_disable_nort();
740 _local_bh_enable_nort();
746 static inline void invoke_softirq(void)
748 #ifndef CONFIG_PREEMPT_RT_FULL
749 if (!force_irqthreads) {
750 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
752 * We can safely execute softirq on the current stack if
753 * it is the irq stack, because it should be near empty
759 * Otherwise, irq_exit() is called on the task stack that can
760 * be potentially deep already. So call softirq in its own stack
761 * to prevent from any overrun.
763 do_softirq_own_stack();
768 #else /* PREEMPT_RT_FULL */
771 local_irq_save(flags);
772 if (__this_cpu_read(ksoftirqd) &&
773 __this_cpu_read(ksoftirqd)->softirqs_raised)
775 local_irq_restore(flags);
779 static inline void tick_irq_exit(void)
781 #ifdef CONFIG_NO_HZ_COMMON
782 int cpu = smp_processor_id();
784 /* Make sure that timer wheel updates are propagated */
785 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
787 tick_nohz_irq_exit();
793 * Exit an interrupt context. Process softirqs if needed and possible:
797 #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
800 WARN_ON_ONCE(!irqs_disabled());
803 account_irq_exit_time(current);
804 preempt_count_sub(HARDIRQ_OFFSET);
805 if (!in_interrupt() && local_softirq_pending())
810 trace_hardirq_exit(); /* must be last! */
813 void raise_softirq(unsigned int nr)
817 local_irq_save(flags);
818 raise_softirq_irqoff(nr);
819 local_irq_restore(flags);
822 void open_softirq(int nr, void (*action)(struct softirq_action *))
824 softirq_vec[nr].action = action;
830 struct tasklet_head {
831 struct tasklet_struct *head;
832 struct tasklet_struct **tail;
835 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
836 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
839 __tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
841 if (tasklet_trylock(t)) {
843 /* We may have been preempted before tasklet_trylock
844 * and __tasklet_action may have already run.
845 * So double check the sched bit while the takslet
846 * is locked before adding it to the list.
848 if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
851 head->tail = &(t->next);
852 raise_softirq_irqoff(nr);
855 /* This is subtle. If we hit the corner case above
856 * It is possible that we get preempted right here,
857 * and another task has successfully called
858 * tasklet_schedule(), then this function, and
859 * failed on the trylock. Thus we must be sure
860 * before releasing the tasklet lock, that the
861 * SCHED_BIT is clear. Otherwise the tasklet
862 * may get its SCHED_BIT set, but not added to the
865 if (!tasklet_tryunlock(t))
871 void __tasklet_schedule(struct tasklet_struct *t)
875 local_irq_save(flags);
876 __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
877 local_irq_restore(flags);
879 EXPORT_SYMBOL(__tasklet_schedule);
881 void __tasklet_hi_schedule(struct tasklet_struct *t)
885 local_irq_save(flags);
886 __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
887 local_irq_restore(flags);
889 EXPORT_SYMBOL(__tasklet_hi_schedule);
891 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
893 BUG_ON(!irqs_disabled());
895 __tasklet_hi_schedule(t);
897 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
899 void tasklet_enable(struct tasklet_struct *t)
901 if (!atomic_dec_and_test(&t->count))
903 if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
906 EXPORT_SYMBOL(tasklet_enable);
908 static void __tasklet_action(struct softirq_action *a,
909 struct tasklet_struct *list)
914 struct tasklet_struct *t = list;
919 * Should always succeed - after a tasklist got on the
920 * list (after getting the SCHED bit set from 0 to 1),
921 * nothing but the tasklet softirq it got queued to can
924 if (!tasklet_trylock(t)) {
932 * If we cannot handle the tasklet because it's disabled,
933 * mark it as pending. tasklet_enable() will later
934 * re-schedule the tasklet.
936 if (unlikely(atomic_read(&t->count))) {
938 /* implicit unlock: */
940 t->state = TASKLET_STATEF_PENDING;
945 * After this point on the tasklet might be rescheduled
946 * on another CPU, but it can only be added to another
947 * CPU's tasklet list if we unlock the tasklet (which we
950 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
957 * Try to unlock the tasklet. We must use cmpxchg, because
958 * another CPU might have scheduled or disabled the tasklet.
959 * We only allow the STATE_RUN -> 0 transition here.
961 while (!tasklet_tryunlock(t)) {
963 * If it got disabled meanwhile, bail out:
965 if (atomic_read(&t->count))
968 * If it got scheduled meanwhile, re-execute
969 * the tasklet function:
971 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
974 printk("hm, tasklet state: %08lx\n", t->state);
983 static void tasklet_action(struct softirq_action *a)
985 struct tasklet_struct *list;
989 list = __this_cpu_read(tasklet_vec.head);
990 __this_cpu_write(tasklet_vec.head, NULL);
991 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
995 __tasklet_action(a, list);
998 static void tasklet_hi_action(struct softirq_action *a)
1000 struct tasklet_struct *list;
1002 local_irq_disable();
1004 list = __this_cpu_read(tasklet_hi_vec.head);
1005 __this_cpu_write(tasklet_hi_vec.head, NULL);
1006 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
1010 __tasklet_action(a, list);
1013 void tasklet_init(struct tasklet_struct *t,
1014 void (*func)(unsigned long), unsigned long data)
1018 atomic_set(&t->count, 0);
1022 EXPORT_SYMBOL(tasklet_init);
1024 void tasklet_kill(struct tasklet_struct *t)
1027 pr_notice("Attempt to kill tasklet from interrupt\n");
1029 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
1032 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
1034 tasklet_unlock_wait(t);
1035 clear_bit(TASKLET_STATE_SCHED, &t->state);
1037 EXPORT_SYMBOL(tasklet_kill);
1044 * The trampoline is called when the hrtimer expires. It schedules a tasklet
1045 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
1046 * hrtimer callback, but from softirq context.
1048 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
1050 struct tasklet_hrtimer *ttimer =
1051 container_of(timer, struct tasklet_hrtimer, timer);
1053 tasklet_hi_schedule(&ttimer->tasklet);
1054 return HRTIMER_NORESTART;
1058 * Helper function which calls the hrtimer callback from
1059 * tasklet/softirq context
1061 static void __tasklet_hrtimer_trampoline(unsigned long data)
1063 struct tasklet_hrtimer *ttimer = (void *)data;
1064 enum hrtimer_restart restart;
1066 restart = ttimer->function(&ttimer->timer);
1067 if (restart != HRTIMER_NORESTART)
1068 hrtimer_restart(&ttimer->timer);
1072 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
1073 * @ttimer: tasklet_hrtimer which is initialized
1074 * @function: hrtimer callback function which gets called from softirq context
1075 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
1076 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
1078 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
1079 enum hrtimer_restart (*function)(struct hrtimer *),
1080 clockid_t which_clock, enum hrtimer_mode mode)
1082 hrtimer_init(&ttimer->timer, which_clock, mode);
1083 ttimer->timer.function = __hrtimer_tasklet_trampoline;
1084 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
1085 (unsigned long)ttimer);
1086 ttimer->function = function;
1088 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
1090 void __init softirq_init(void)
1094 for_each_possible_cpu(cpu) {
1095 per_cpu(tasklet_vec, cpu).tail =
1096 &per_cpu(tasklet_vec, cpu).head;
1097 per_cpu(tasklet_hi_vec, cpu).tail =
1098 &per_cpu(tasklet_hi_vec, cpu).head;
1101 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
1102 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1105 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
1106 void tasklet_unlock_wait(struct tasklet_struct *t)
1108 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
1110 * Hack for now to avoid this busy-loop:
1112 #ifdef CONFIG_PREEMPT_RT_FULL
1119 EXPORT_SYMBOL(tasklet_unlock_wait);
1122 static int ksoftirqd_should_run(unsigned int cpu)
1124 return ksoftirqd_softirq_pending();
1127 #ifdef CONFIG_HOTPLUG_CPU
1129 * tasklet_kill_immediate is called to remove a tasklet which can already be
1130 * scheduled for execution on @cpu.
1132 * Unlike tasklet_kill, this function removes the tasklet
1133 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
1135 * When this function is called, @cpu must be in the CPU_DEAD state.
1137 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
1139 struct tasklet_struct **i;
1141 BUG_ON(cpu_online(cpu));
1142 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
1144 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
1147 /* CPU is dead, so no lock needed. */
1148 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1151 /* If this was the tail element, move the tail ptr */
1153 per_cpu(tasklet_vec, cpu).tail = i;
1160 static void takeover_tasklets(unsigned int cpu)
1162 /* CPU is dead, so no lock needed. */
1163 local_irq_disable();
1165 /* Find end, append list for that CPU. */
1166 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
1167 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
1168 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
1169 per_cpu(tasklet_vec, cpu).head = NULL;
1170 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
1172 raise_softirq_irqoff(TASKLET_SOFTIRQ);
1174 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
1175 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
1176 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
1177 per_cpu(tasklet_hi_vec, cpu).head = NULL;
1178 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
1180 raise_softirq_irqoff(HI_SOFTIRQ);
1184 #endif /* CONFIG_HOTPLUG_CPU */
1186 static int cpu_callback(struct notifier_block *nfb, unsigned long action,
1190 #ifdef CONFIG_HOTPLUG_CPU
1192 case CPU_DEAD_FROZEN:
1193 takeover_tasklets((unsigned long)hcpu);
1195 #endif /* CONFIG_HOTPLUG_CPU */
1200 static struct notifier_block cpu_nfb = {
1201 .notifier_call = cpu_callback
1204 static struct smp_hotplug_thread softirq_threads = {
1205 .store = &ksoftirqd,
1206 .setup = ksoftirqd_set_sched_params,
1207 .cleanup = ksoftirqd_clr_sched_params,
1208 .thread_should_run = ksoftirqd_should_run,
1209 .thread_fn = run_ksoftirqd,
1210 .thread_comm = "ksoftirqd/%u",
1213 static __init int spawn_ksoftirqd(void)
1215 register_cpu_notifier(&cpu_nfb);
1217 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
1221 early_initcall(spawn_ksoftirqd);
1224 * [ These __weak aliases are kept in a separate compilation unit, so that
1225 * GCC does not inline them incorrectly. ]
1228 int __init __weak early_irq_init(void)
1233 int __init __weak arch_probe_nr_irqs(void)
1235 return NR_IRQS_LEGACY;
1238 int __init __weak arch_early_irq_init(void)
1243 unsigned int __weak arch_dynirq_lower_bound(unsigned int from)