]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
softirq-local-lock.patch
authorThomas Gleixner <tglx@linutronix.de>
Tue, 28 Jun 2011 13:57:18 +0000 (15:57 +0200)
committerMichal Sojka <sojka@merica.cz>
Sun, 13 Sep 2015 07:47:19 +0000 (09:47 +0200)
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/bottom_half.h
include/linux/interrupt.h
include/linux/preempt_mask.h
include/linux/sched.h
init/main.c
kernel/softirq.c

index 86c12c93e3cf6ce9c1db4085b9ee986d2c1e5b9a..8ca9389352f2b74e00663669b7dea2699c59074c 100644 (file)
@@ -4,6 +4,17 @@
 #include <linux/preempt.h>
 #include <linux/preempt_mask.h>
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+extern void local_bh_disable(void);
+extern void _local_bh_enable(void);
+extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
+
+#else
+
 #ifdef CONFIG_TRACE_IRQFLAGS
 extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
 #else
@@ -31,5 +42,6 @@ static inline void local_bh_enable(void)
 {
        __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
 }
+#endif
 
 #endif /* _LINUX_BH_H */
index 5bb9e19b3441dae1d5b51ae73cfe3b8b49acc901..1e01ddc72cd1a8da529528e1a7fccadc858ae369 100644 (file)
@@ -430,7 +430,11 @@ struct softirq_action
 
 asmlinkage void do_softirq(void);
 asmlinkage void __do_softirq(void);
+#ifndef CONFIG_PREEMPT_RT_FULL
 static inline void thread_do_softirq(void) { do_softirq(); }
+#else
+extern void thread_do_softirq(void);
+#endif
 #ifdef __ARCH_HAS_DO_SOFTIRQ
 void do_softirq_own_stack(void);
 #else
@@ -599,6 +603,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
        tasklet_kill(&ttimer->tasklet);
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+extern void softirq_early_init(void);
+#else
+static inline void softirq_early_init(void) { }
+#endif
+
 /*
  * Autoprobing for irqs:
  *
index dbeec4d4a3beafb0d6123944f108981879c3137e..c7e373dc73143ad6b01197c904885b406bae185f 100644 (file)
 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
 #define NMI_OFFSET     (1UL << NMI_SHIFT)
 
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define SOFTIRQ_DISABLE_OFFSET        (2 * SOFTIRQ_OFFSET)
+#else
+# define SOFTIRQ_DISABLE_OFFSET        (0)
+#endif
 
 #define PREEMPT_ACTIVE_BITS    1
 #define PREEMPT_ACTIVE_SHIFT   (NMI_SHIFT + NMI_BITS)
 #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
 
 #define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
-#define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
 #define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
                                 | NMI_MASK))
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define softirq_count()       (preempt_count() & SOFTIRQ_MASK)
+# define in_serving_softirq()  (softirq_count() & SOFTIRQ_OFFSET)
+#else
+# define softirq_count()       (0UL)
+extern int in_serving_softirq(void);
+#endif
 
 /*
  * Are we doing bottom half or hardware interrupt processing?
@@ -64,7 +74,6 @@
 #define in_irq()               (hardirq_count())
 #define in_softirq()           (softirq_count())
 #define in_interrupt()         (irq_count())
-#define in_serving_softirq()   (softirq_count() & SOFTIRQ_OFFSET)
 
 /*
  * Are we in NMI context?
index 0a597d5b0250a0a26d09d5e1aef936d3fa648925..d4c708d9ebf243793acb8012e0f243d072600a12 100644 (file)
@@ -1782,6 +1782,7 @@ struct task_struct {
 #endif
 #ifdef CONFIG_PREEMPT_RT_BASE
        struct rcu_head put_rcu;
+       int softirq_nestcnt;
 #endif
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
        unsigned long   task_state_change;
index 6f0f1c5ff8cc82c41b36f107f68732003c5b1dc3..45708159e2e3a70cca3c68d7405edfb53ff43964 100644 (file)
@@ -514,6 +514,7 @@ asmlinkage __visible void __init start_kernel(void)
  * Interrupts are still disabled. Do necessary setups, then
  * enable them
  */
+       softirq_early_init();
        boot_cpu_init();
        page_address_init();
        pr_notice("%s", linux_banner);
index 0e2dd39f4e2a0eb6da62a8bc32db410deb12908e..63d427becc9e4befe47c708bc506123ac12ef2a8 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/smp.h>
 #include <linux/smpboot.h>
 #include <linux/tick.h>
+#include <linux/locallock.h>
 #include <linux/irq.h>
 
 #define CREATE_TRACE_POINTS
@@ -177,6 +178,7 @@ static void handle_pending_softirqs(u32 pending)
        local_irq_disable();
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /*
  * preempt_count and SOFTIRQ_OFFSET usage:
  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@@ -388,6 +390,182 @@ asmlinkage __visible void do_softirq(void)
        local_irq_restore(flags);
 }
 
+static inline void local_bh_disable_nort(void) { local_bh_disable(); }
+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
+
+#else /* !PREEMPT_RT_FULL */
+
+/*
+ * On RT we serialize softirq execution with a cpu local lock
+ */
+static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
+static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
+
+asmlinkage void __do_softirq(void);
+
+void __init softirq_early_init(void)
+{
+       local_irq_lock_init(local_softirq_lock);
+}
+
+static void __local_bh_disable(void)
+{
+       migrate_disable();
+       current->softirq_nestcnt++;
+}
+
+void local_bh_disable(void)
+{
+       __local_bh_disable();
+}
+EXPORT_SYMBOL(local_bh_disable);
+
+void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+{
+       __local_bh_disable();
+       if (cnt & PREEMPT_CHECK_OFFSET)
+               preempt_disable();
+}
+
+static void __local_bh_enable(void)
+{
+       if (WARN_ON(current->softirq_nestcnt == 0))
+               return;
+
+       if ((current->softirq_nestcnt == 1) &&
+           local_softirq_pending() &&
+           local_trylock(local_softirq_lock)) {
+
+               local_irq_disable();
+               if (local_softirq_pending())
+                       __do_softirq();
+               local_irq_enable();
+               local_unlock(local_softirq_lock);
+               WARN_ON(current->softirq_nestcnt != 1);
+       }
+       current->softirq_nestcnt--;
+       migrate_enable();
+}
+
+void local_bh_enable(void)
+{
+       __local_bh_enable();
+}
+EXPORT_SYMBOL(local_bh_enable);
+
+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+{
+       __local_bh_enable();
+       if (cnt & PREEMPT_CHECK_OFFSET)
+               preempt_enable();
+}
+
+void local_bh_enable_ip(unsigned long ip)
+{
+       local_bh_enable();
+}
+EXPORT_SYMBOL(local_bh_enable_ip);
+
+/* For tracing */
+int notrace __in_softirq(void)
+{
+       if (__this_cpu_read(local_softirq_lock.owner) == current)
+               return __this_cpu_read(local_softirq_lock.nestcnt);
+       return 0;
+}
+
+int in_serving_softirq(void)
+{
+       int res;
+
+       preempt_disable();
+       res = __this_cpu_read(local_softirq_runner) == current;
+       preempt_enable();
+       return res;
+}
+EXPORT_SYMBOL(in_serving_softirq);
+
+/*
+ * Called with bh and local interrupts disabled. For full RT cpu must
+ * be pinned.
+ */
+asmlinkage void __do_softirq(void)
+{
+       u32 pending = local_softirq_pending();
+       int cpu = smp_processor_id();
+
+       current->softirq_nestcnt++;
+
+       /* Reset the pending bitmask before enabling irqs */
+       set_softirq_pending(0);
+
+       __this_cpu_write(local_softirq_runner, current);
+
+       lockdep_softirq_enter();
+
+       handle_pending_softirqs(pending, cpu);
+
+       pending = local_softirq_pending();
+       if (pending)
+               wakeup_softirqd();
+
+       lockdep_softirq_exit();
+       __this_cpu_write(local_softirq_runner, NULL);
+
+       current->softirq_nestcnt--;
+}
+
+static int __thread_do_softirq(int cpu)
+{
+       /*
+        * Prevent the current cpu from going offline.
+        * pin_current_cpu() can reenable preemption and block on the
+        * hotplug mutex. When it returns, the current cpu is
+        * pinned. It might be the wrong one, but the offline check
+        * below catches that.
+        */
+       pin_current_cpu();
+       /*
+        * If called from ksoftirqd (cpu >= 0) we need to check
+        * whether we are on the wrong cpu due to cpu offlining. If
+        * called via thread_do_softirq() no action required.
+        */
+       if (cpu >= 0 && cpu_is_offline(cpu)) {
+               unpin_current_cpu();
+               return -1;
+       }
+       preempt_enable();
+       local_lock(local_softirq_lock);
+       local_irq_disable();
+       /*
+        * We cannot switch stacks on RT as we want to be able to
+        * schedule!
+        */
+       if (local_softirq_pending())
+               __do_softirq();
+       local_unlock(local_softirq_lock);
+       unpin_current_cpu();
+       preempt_disable();
+       local_irq_enable();
+       return 0;
+}
+
+/*
+ * Called from netif_rx_ni(). Preemption enabled.
+ */
+void thread_do_softirq(void)
+{
+       if (!in_serving_softirq()) {
+               preempt_disable();
+               __thread_do_softirq(-1);
+               preempt_enable();
+       }
+}
+
+static inline void local_bh_disable_nort(void) { }
+static inline void _local_bh_enable_nort(void) { }
+
+#endif /* PREEMPT_RT_FULL */
 /*
  * Enter an interrupt context.
  */
@@ -399,9 +577,9 @@ void irq_enter(void)
                 * Prevent raise_softirq from needlessly waking up ksoftirqd
                 * here, as softirq will be serviced on return from interrupt.
                 */
-               local_bh_disable();
+               local_bh_disable_nort();
                tick_irq_enter();
-               _local_bh_enable();
+               _local_bh_enable_nort();
        }
 
        __irq_enter();
@@ -409,6 +587,7 @@ void irq_enter(void)
 
 static inline void invoke_softirq(void)
 {
+#ifndef CONFIG_PREEMPT_RT_FULL
        if (!force_irqthreads) {
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
                /*
@@ -428,6 +607,9 @@ static inline void invoke_softirq(void)
        } else {
                wakeup_softirqd();
        }
+#else
+       wakeup_softirqd();
+#endif
 }
 
 static inline void tick_irq_exit(void)