2 #ifndef _LINUX_INTERRUPT_H
3 #define _LINUX_INTERRUPT_H
5 #include <linux/kernel.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/preempt.h>
9 #include <linux/cpumask.h>
10 #include <linux/irqreturn.h>
11 #include <linux/irqnr.h>
12 #include <linux/hardirq.h>
13 #include <linux/irqflags.h>
14 #include <linux/hrtimer.h>
15 #include <linux/kref.h>
16 #include <linux/workqueue.h>
17 #include <linux/swork.h>
19 #include <linux/atomic.h>
20 #include <asm/ptrace.h>
24 * These correspond to the IORESOURCE_IRQ_* defines in
25 * linux/ioport.h to select the interrupt line behaviour. When
26 * requesting an interrupt without specifying a IRQF_TRIGGER, the
27 * setting should be assumed to be "as already configured", which
28 * may be as per machine or firmware initialisation.
30 #define IRQF_TRIGGER_NONE 0x00000000
31 #define IRQF_TRIGGER_RISING 0x00000001
32 #define IRQF_TRIGGER_FALLING 0x00000002
33 #define IRQF_TRIGGER_HIGH 0x00000004
34 #define IRQF_TRIGGER_LOW 0x00000008
35 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37 #define IRQF_TRIGGER_PROBE 0x00000010
40 * These flags used only by the kernel as part of the
41 * irq handling routines.
43 * IRQF_SHARED - allow sharing the irq among several devices
44 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
45 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
46 * IRQF_PERCPU - Interrupt is per cpu
47 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
48 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
49 * registered first in an shared interrupt is considered for
50 * performance reasons)
51 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
52 * Used by threaded interrupts which need to keep the
53 * irq line disabled until the threaded handler has been run.
54 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
55 * that this interrupt will wake the system from a suspended
56 * state. See Documentation/power/suspend-and-interrupts.txt
57 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
58 * IRQF_NO_THREAD - Interrupt cannot be threaded
59 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
61 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
62 * interrupt handler after suspending interrupts. For system
63 * wakeup devices users need to implement wakeup detection in
64 * their interrupt handlers.
65 * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
67 #define IRQF_SHARED 0x00000080
68 #define IRQF_PROBE_SHARED 0x00000100
69 #define __IRQF_TIMER 0x00000200
70 #define IRQF_PERCPU 0x00000400
71 #define IRQF_NOBALANCING 0x00000800
72 #define IRQF_IRQPOLL 0x00001000
73 #define IRQF_ONESHOT 0x00002000
74 #define IRQF_NO_SUSPEND 0x00004000
75 #define IRQF_FORCE_RESUME 0x00008000
76 #define IRQF_NO_THREAD 0x00010000
77 #define IRQF_EARLY_RESUME 0x00020000
78 #define IRQF_COND_SUSPEND 0x00040000
79 #define IRQF_NO_SOFTIRQ_CALL 0x00080000
81 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
84 * These values can be returned by request_any_context_irq() and
85 * describe the context the interrupt will be run in.
87 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
88 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
95 typedef irqreturn_t (*irq_handler_t)(int, void *);
98 * struct irqaction - per interrupt action descriptor
99 * @handler: interrupt handler function
100 * @name: name of the device
101 * @dev_id: cookie to identify the device
102 * @percpu_dev_id: cookie to identify the device
103 * @next: pointer to the next irqaction for shared interrupts
104 * @irq: interrupt number
105 * @flags: flags (see IRQF_* above)
106 * @thread_fn: interrupt handler function for threaded interrupts
107 * @thread: thread pointer for threaded interrupts
108 * @secondary: pointer to secondary irqaction (force threading)
109 * @thread_flags: flags related to @thread
110 * @thread_mask: bitmask for keeping track of @thread activity
111 * @dir: pointer to the proc/irq/NN/name entry
114 irq_handler_t handler;
116 void __percpu *percpu_dev_id;
117 struct irqaction *next;
118 irq_handler_t thread_fn;
119 struct task_struct *thread;
120 struct irqaction *secondary;
123 unsigned long thread_flags;
124 unsigned long thread_mask;
126 struct proc_dir_entry *dir;
127 } ____cacheline_internodealigned_in_smp;
129 extern irqreturn_t no_action(int cpl, void *dev_id);
132 * If a (PCI) device interrupt is not connected we set dev->irq to
133 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
134 * can distingiush that case from other error returns.
136 * 0x80000000 is guaranteed to be outside the available range of interrupts
137 * and easy to distinguish from other possible incorrect values.
139 #define IRQ_NOTCONNECTED (1U << 31)
141 extern int __must_check
142 request_threaded_irq(unsigned int irq, irq_handler_t handler,
143 irq_handler_t thread_fn,
144 unsigned long flags, const char *name, void *dev);
146 static inline int __must_check
147 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
148 const char *name, void *dev)
150 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
153 extern int __must_check
154 request_any_context_irq(unsigned int irq, irq_handler_t handler,
155 unsigned long flags, const char *name, void *dev_id);
157 extern int __must_check
158 request_percpu_irq(unsigned int irq, irq_handler_t handler,
159 const char *devname, void __percpu *percpu_dev_id);
161 extern void free_irq(unsigned int, void *);
162 extern void free_percpu_irq(unsigned int, void __percpu *);
166 extern int __must_check
167 devm_request_threaded_irq(struct device *dev, unsigned int irq,
168 irq_handler_t handler, irq_handler_t thread_fn,
169 unsigned long irqflags, const char *devname,
172 static inline int __must_check
173 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
174 unsigned long irqflags, const char *devname, void *dev_id)
176 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
180 extern int __must_check
181 devm_request_any_context_irq(struct device *dev, unsigned int irq,
182 irq_handler_t handler, unsigned long irqflags,
183 const char *devname, void *dev_id);
185 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
188 * On lockdep we dont want to enable hardirqs in hardirq
189 * context. Use local_irq_enable_in_hardirq() to annotate
190 * kernel code that has to do this nevertheless (pretty much
191 * the only valid case is for old/broken hardware that is
194 * NOTE: in theory this might break fragile code that relies
195 * on hardirq delivery - in practice we dont seem to have such
196 * places left. So the only effect should be slightly increased
197 * irqs-off latencies.
199 #ifdef CONFIG_LOCKDEP
200 # define local_irq_enable_in_hardirq() do { } while (0)
202 # define local_irq_enable_in_hardirq() local_irq_enable_nort()
205 extern void disable_irq_nosync(unsigned int irq);
206 extern bool disable_hardirq(unsigned int irq);
207 extern void disable_irq(unsigned int irq);
208 extern void disable_percpu_irq(unsigned int irq);
209 extern void enable_irq(unsigned int irq);
210 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
211 extern bool irq_percpu_is_enabled(unsigned int irq);
212 extern void irq_wake_thread(unsigned int irq, void *dev_id);
214 /* The following three functions are for the core kernel use only. */
215 extern void suspend_device_irqs(void);
216 extern void resume_device_irqs(void);
219 * struct irq_affinity_notify - context for notification of IRQ affinity changes
220 * @irq: Interrupt to which notification applies
221 * @kref: Reference count, for internal use
222 * @swork: Swork item, for internal use
223 * @work: Work item, for internal use
224 * @notify: Function to be called on change. This will be
225 * called in process context.
226 * @release: Function to be called on release. This will be
227 * called in process context. Once registered, the
228 * structure must only be freed when this function is
231 struct irq_affinity_notify {
234 #ifdef CONFIG_PREEMPT_RT_BASE
235 struct swork_event swork;
237 struct work_struct work;
239 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
240 void (*release)(struct kref *ref);
243 #if defined(CONFIG_SMP)
245 extern cpumask_var_t irq_default_affinity;
247 /* Internal implementation. Use the helpers below */
248 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
252 * irq_set_affinity - Set the irq affinity of a given irq
253 * @irq: Interrupt to set affinity
256 * Fails if cpumask does not contain an online CPU
259 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
261 return __irq_set_affinity(irq, cpumask, false);
265 * irq_force_affinity - Force the irq affinity of a given irq
266 * @irq: Interrupt to set affinity
269 * Same as irq_set_affinity, but without checking the mask against
272 * Solely for low level cpu hotplug code, where we need to make per
273 * cpu interrupts affine before the cpu becomes online.
276 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
278 return __irq_set_affinity(irq, cpumask, true);
281 extern int irq_can_set_affinity(unsigned int irq);
282 extern int irq_select_affinity(unsigned int irq);
284 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
287 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
289 struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
290 int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
292 #else /* CONFIG_SMP */
294 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
299 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
304 static inline int irq_can_set_affinity(unsigned int irq)
309 static inline int irq_select_affinity(unsigned int irq) { return 0; }
311 static inline int irq_set_affinity_hint(unsigned int irq,
312 const struct cpumask *m)
318 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
323 static inline struct cpumask *
324 irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
330 irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
335 #endif /* CONFIG_SMP */
338 * Special lockdep variants of irq disabling/enabling.
339 * These should be used for locking constructs that
340 * know that a particular irq context which is disabled,
341 * and which is the only irq-context user of a lock,
342 * that it's safe to take the lock in the irq-disabled
343 * section without disabling hardirqs.
345 * On !CONFIG_LOCKDEP they are equivalent to the normal
346 * irq disable/enable methods.
348 static inline void disable_irq_nosync_lockdep(unsigned int irq)
350 disable_irq_nosync(irq);
351 #ifdef CONFIG_LOCKDEP
356 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
358 disable_irq_nosync(irq);
359 #ifdef CONFIG_LOCKDEP
360 local_irq_save(*flags);
364 static inline void disable_irq_lockdep(unsigned int irq)
367 #ifdef CONFIG_LOCKDEP
372 static inline void enable_irq_lockdep(unsigned int irq)
374 #ifdef CONFIG_LOCKDEP
380 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
382 #ifdef CONFIG_LOCKDEP
383 local_irq_restore(*flags);
388 /* IRQ wakeup (PM) control: */
389 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
391 static inline int enable_irq_wake(unsigned int irq)
393 return irq_set_irq_wake(irq, 1);
396 static inline int disable_irq_wake(unsigned int irq)
398 return irq_set_irq_wake(irq, 0);
402 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
404 enum irqchip_irq_state {
405 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
406 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
407 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
408 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
411 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
413 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
416 #ifdef CONFIG_IRQ_FORCED_THREADING
417 # ifndef CONFIG_PREEMPT_RT_BASE
418 extern bool force_irqthreads;
420 # define force_irqthreads (true)
423 #define force_irqthreads (false)
426 #ifndef __ARCH_SET_SOFTIRQ_PENDING
427 #define set_softirq_pending(x) (local_softirq_pending() = (x))
428 #define or_softirq_pending(x) (local_softirq_pending() |= (x))
431 /* Some architectures might implement lazy enabling/disabling of
432 * interrupts. In some cases, such as stop_machine, we might want
433 * to ensure that after a local_irq_disable(), interrupts have
434 * really been disabled in hardware. Such architectures need to
435 * implement the following hook.
437 #ifndef hard_irq_disable
438 #define hard_irq_disable() do { } while(0)
441 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
442 frequency threaded job scheduling. For almost all the purposes
443 tasklets are more than enough. F.e. all serial device BHs et
444 al. should be converted to tasklets, not to softirqs.
457 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
459 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
464 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
466 /* map softirq index to softirq name. update 'softirq_to_name' in
467 * kernel/softirq.c when adding a new softirq.
469 extern const char * const softirq_to_name[NR_SOFTIRQS];
471 /* softirq mask and active fields moved to irq_cpustat_t in
472 * asm/hardirq.h to get better cache usage. KAO
475 struct softirq_action
477 void (*action)(struct softirq_action *);
480 #ifndef CONFIG_PREEMPT_RT_FULL
481 asmlinkage void do_softirq(void);
482 asmlinkage void __do_softirq(void);
483 static inline void thread_do_softirq(void) { do_softirq(); }
484 #ifdef __ARCH_HAS_DO_SOFTIRQ
485 void do_softirq_own_stack(void);
487 static inline void do_softirq_own_stack(void)
493 extern void thread_do_softirq(void);
496 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
497 extern void softirq_init(void);
498 extern void __raise_softirq_irqoff(unsigned int nr);
499 #ifdef CONFIG_PREEMPT_RT_FULL
500 extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
502 static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
504 __raise_softirq_irqoff(nr);
508 extern void raise_softirq_irqoff(unsigned int nr);
509 extern void raise_softirq(unsigned int nr);
510 extern void softirq_check_pending_idle(void);
512 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
514 static inline struct task_struct *this_cpu_ksoftirqd(void)
516 return this_cpu_read(ksoftirqd);
519 /* Tasklets --- multithreaded analogue of BHs.
521 Main feature differing them of generic softirqs: tasklet
522 is running only on one CPU simultaneously.
524 Main feature differing them of BHs: different tasklets
525 may be run simultaneously on different CPUs.
528 * If tasklet_schedule() is called, then tasklet is guaranteed
529 to be executed on some cpu at least once after this.
530 * If the tasklet is already scheduled, but its execution is still not
531 started, it will be executed only once.
532 * If this tasklet is already running on another CPU, it is rescheduled
534 * Schedule must not be called from the tasklet itself (a lockup occurs)
535 * Tasklet is strictly serialized wrt itself, but not
536 wrt another tasklets. If client needs some intertask synchronization,
537 he makes it with spinlocks.
540 struct tasklet_struct
542 struct tasklet_struct *next;
545 void (*func)(unsigned long);
549 #define DECLARE_TASKLET(name, func, data) \
550 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
552 #define DECLARE_TASKLET_DISABLED(name, func, data) \
553 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
558 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
559 TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
560 TASKLET_STATE_PENDING /* Tasklet is pending */
563 #define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
564 #define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
565 #define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
567 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
568 static inline int tasklet_trylock(struct tasklet_struct *t)
570 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
573 static inline int tasklet_tryunlock(struct tasklet_struct *t)
575 return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
578 static inline void tasklet_unlock(struct tasklet_struct *t)
580 smp_mb__before_atomic();
581 clear_bit(TASKLET_STATE_RUN, &(t)->state);
584 extern void tasklet_unlock_wait(struct tasklet_struct *t);
587 #define tasklet_trylock(t) 1
588 #define tasklet_tryunlock(t) 1
589 #define tasklet_unlock_wait(t) do { } while (0)
590 #define tasklet_unlock(t) do { } while (0)
593 extern void __tasklet_schedule(struct tasklet_struct *t);
595 static inline void tasklet_schedule(struct tasklet_struct *t)
597 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
598 __tasklet_schedule(t);
601 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
603 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
605 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
606 __tasklet_hi_schedule(t);
609 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
612 * This version avoids touching any other tasklets. Needed for kmemcheck
613 * in order not to take any page faults while enqueueing this tasklet;
614 * consider VERY carefully whether you really need this or
615 * tasklet_hi_schedule()...
617 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
619 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
620 __tasklet_hi_schedule_first(t);
624 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
626 atomic_inc(&t->count);
627 smp_mb__after_atomic();
630 static inline void tasklet_disable(struct tasklet_struct *t)
632 tasklet_disable_nosync(t);
633 tasklet_unlock_wait(t);
637 extern void tasklet_enable(struct tasklet_struct *t);
638 extern void tasklet_kill(struct tasklet_struct *t);
639 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
640 extern void tasklet_init(struct tasklet_struct *t,
641 void (*func)(unsigned long), unsigned long data);
643 struct tasklet_hrtimer {
644 struct hrtimer timer;
645 struct tasklet_struct tasklet;
646 enum hrtimer_restart (*function)(struct hrtimer *);
650 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
651 enum hrtimer_restart (*function)(struct hrtimer *),
652 clockid_t which_clock, enum hrtimer_mode mode);
655 void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
656 const enum hrtimer_mode mode)
658 hrtimer_start(&ttimer->timer, time, mode);
662 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
664 hrtimer_cancel(&ttimer->timer);
665 tasklet_kill(&ttimer->tasklet);
668 #ifdef CONFIG_PREEMPT_RT_FULL
669 extern void softirq_early_init(void);
671 static inline void softirq_early_init(void) { }
675 * Autoprobing for irqs:
677 * probe_irq_on() and probe_irq_off() provide robust primitives
678 * for accurate IRQ probing during kernel initialization. They are
679 * reasonably simple to use, are not "fooled" by spurious interrupts,
680 * and, unlike other attempts at IRQ probing, they do not get hung on
681 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
683 * For reasonably foolproof probing, use them as follows:
685 * 1. clear and/or mask the device's internal interrupt.
687 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
688 * 4. enable the device and cause it to trigger an interrupt.
689 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
690 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
691 * 7. service the device to clear its pending interrupt.
692 * 8. loop again if paranoia is required.
694 * probe_irq_on() returns a mask of allocated irq's.
696 * probe_irq_off() takes the mask as a parameter,
697 * and returns the irq number which occurred,
698 * or zero if none occurred, or a negative irq number
699 * if more than one irq occurred.
702 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
703 static inline unsigned long probe_irq_on(void)
707 static inline int probe_irq_off(unsigned long val)
711 static inline unsigned int probe_irq_mask(unsigned long val)
716 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
717 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
718 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
721 #ifdef CONFIG_PROC_FS
722 /* Initialize /proc/irq/ */
723 extern void init_irq_proc(void);
725 static inline void init_irq_proc(void)
731 int show_interrupts(struct seq_file *p, void *v);
732 int arch_show_interrupts(struct seq_file *p, int prec);
734 extern int early_irq_init(void);
735 extern int arch_probe_nr_irqs(void);
736 extern int arch_early_irq_init(void);
738 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
740 * We want to know which function is an entrypoint of a hardirq or a softirq.
742 #define __irq_entry __attribute__((__section__(".irqentry.text")))
743 #define __softirq_entry \
744 __attribute__((__section__(".softirqentry.text")))
746 /* Limits of hardirq entrypoints */
747 extern char __irqentry_text_start[];
748 extern char __irqentry_text_end[];
749 /* Limits of softirq entrypoints */
750 extern char __softirqentry_text_start[];
751 extern char __softirqentry_text_end[];
755 #define __softirq_entry