X-Git-Url: https://rtime.felk.cvut.cz/gitweb/zynq/linux.git/blobdiff_plain/b450e900fdb473a53613ad014f31eedbc80b1c90..aebcc440d27130c7bfb8868bc7246698e76d5f45:/kernel/irq/manage.c diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6b669593e7eb..e357bf6c59d5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -22,6 +22,7 @@ #include "internals.h" #ifdef CONFIG_IRQ_FORCED_THREADING +# ifndef CONFIG_PREEMPT_RT_BASE __read_mostly bool force_irqthreads; static int __init setup_forced_irqthreads(char *arg) @@ -30,6 +31,7 @@ static int __init setup_forced_irqthreads(char *arg) return 0; } early_param("threadirqs", setup_forced_irqthreads); +# endif #endif static void __synchronize_hardirq(struct irq_desc *desc) @@ -233,7 +235,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); + +#ifdef CONFIG_PREEMPT_RT_BASE + swork_queue(&desc->affinity_notify->swork); +#else schedule_work(&desc->affinity_notify->work); +#endif } irqd_set(data, IRQD_AFFINITY_SET); @@ -271,10 +278,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); -static void irq_affinity_notify(struct work_struct *work) +static void _irq_affinity_notify(struct irq_affinity_notify *notify) { - struct irq_affinity_notify *notify = - container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; unsigned long flags; @@ -296,6 +301,35 @@ out: kref_put(¬ify->kref, notify->release); } +#ifdef CONFIG_PREEMPT_RT_BASE +static void init_helper_thread(void) +{ + static int init_sworker_once; + + if (init_sworker_once) + return; + if (WARN_ON(swork_get())) + return; + init_sworker_once = 1; +} + +static void irq_affinity_notify(struct swork_event *swork) +{ + struct irq_affinity_notify *notify = + container_of(swork, struct irq_affinity_notify, swork); + _irq_affinity_notify(notify); +} + +#else + +static void irq_affinity_notify(struct work_struct *work) +{ + struct irq_affinity_notify *notify = + container_of(work, struct irq_affinity_notify, work); + _irq_affinity_notify(notify); +} +#endif + /** * irq_set_affinity_notifier - control notification of IRQ affinity changes * @irq: Interrupt for which to enable/disable notification @@ -324,7 +358,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) if (notify) { notify->irq = irq; kref_init(¬ify->kref); +#ifdef CONFIG_PREEMPT_RT_BASE + INIT_SWORK(¬ify->swork, irq_affinity_notify); + init_helper_thread(); +#else INIT_WORK(¬ify->work, irq_affinity_notify); +#endif } raw_spin_lock_irqsave(&desc->lock, flags); @@ -879,7 +918,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); - local_bh_enable(); + /* + * Interrupts which have real time requirements can be set up + * to avoid softirq processing in the thread handler. This is + * safe as these interrupts do not raise soft interrupts. + */ + if (irq_settings_no_softirq_call(desc)) + _local_bh_enable(); + else + local_bh_enable(); return ret; } @@ -976,6 +1023,12 @@ static int irq_thread(void *data) if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); +#ifdef CONFIG_PREEMPT_RT_FULL + migrate_disable(); + add_interrupt_randomness(action->irq, 0, + desc->random_ip ^ (unsigned long) action); + migrate_enable(); +#endif wake_threads_waitq(desc); } @@ -1336,6 +1389,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } + if (new->flags & IRQF_NO_SOFTIRQ_CALL) + irq_settings_set_no_softirq_call(desc); + /* Set default affinity mask once everything is setup */ setup_affinity(desc, mask); @@ -2061,7 +2117,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * - * This function should be called with preemption disabled if the + * This function should be called with migration disabled if the * interrupt controller has per-cpu registers. */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,