]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - kernel/rcu/tree.c
Merge branch '4.0.8-rt6'
[zynq/linux.git] / kernel / rcu / tree.c
index 48d640ca1a05b8c0f83fe2b217b925a6dec69fa4..924d284ccb4fe900f9883f5c352127c9474feaf8 100644 (file)
 #include <linux/random.h>
 #include <linux/ftrace_event.h>
 #include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/oom.h>
+#include <linux/smpboot.h>
+#include "../time/tick-internal.h"
 
 #include "tree.h"
 #include "rcu.h"
@@ -198,6 +203,19 @@ void rcu_sched_qs(void)
        }
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void rcu_preempt_qs(void);
+
+void rcu_bh_qs(void)
+{
+       unsigned long flags;
+
+       /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
+       local_irq_save(flags);
+       rcu_preempt_qs();
+       local_irq_restore(flags);
+}
+#else
 void rcu_bh_qs(void)
 {
        if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
@@ -207,6 +225,7 @@ void rcu_bh_qs(void)
                __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
        }
 }
+#endif
 
 static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
 
@@ -382,6 +401,7 @@ unsigned long rcu_batches_completed_sched(void)
 }
 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /*
  * Return the number of RCU BH batches completed thus far for debug & stats.
  */
@@ -409,6 +429,13 @@ void rcu_bh_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
 
+#else
+void rcu_force_quiescent_state(void)
+{
+}
+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
+#endif
+
 /*
  * Show the state of the grace-period kthreads.
  */
@@ -1512,7 +1539,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
            !ACCESS_ONCE(rsp->gp_flags) ||
            !rsp->gp_kthread)
                return;
-       wake_up(&rsp->gp_wq);
+       swait_wake(&rsp->gp_wq);
 }
 
 /*
@@ -1903,7 +1930,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                               ACCESS_ONCE(rsp->gpnum),
                                               TPS("reqwait"));
                        rsp->gp_state = RCU_GP_WAIT_GPS;
-                       wait_event_interruptible(rsp->gp_wq,
+                       swait_event_interruptible(rsp->gp_wq,
                                                 ACCESS_ONCE(rsp->gp_flags) &
                                                 RCU_GP_FLAG_INIT);
                        /* Locking provides needed memory barrier. */
@@ -1932,7 +1959,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                               ACCESS_ONCE(rsp->gpnum),
                                               TPS("fqswait"));
                        rsp->gp_state = RCU_GP_WAIT_FQS;
-                       ret = wait_event_interruptible_timeout(rsp->gp_wq,
+                       ret = swait_event_interruptible_timeout(rsp->gp_wq,
                                        ((gf = ACCESS_ONCE(rsp->gp_flags)) &
                                         RCU_GP_FLAG_FQS) ||
                                        (!ACCESS_ONCE(rnp->qsmask) &&
@@ -2693,18 +2720,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 /*
  * Do RCU core processing for the current CPU.
  */
-static void rcu_process_callbacks(struct softirq_action *unused)
+static void rcu_process_callbacks(void)
 {
        struct rcu_state *rsp;
 
        if (cpu_is_offline(smp_processor_id()))
                return;
-       trace_rcu_utilization(TPS("Start RCU core"));
        for_each_rcu_flavor(rsp)
                __rcu_process_callbacks(rsp);
-       trace_rcu_utilization(TPS("End RCU core"));
 }
 
+static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
 /*
  * Schedule RCU callback invocation.  If the specified type of RCU
  * does not support RCU priority boosting, just do a direct call,
@@ -2716,18 +2742,105 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 {
        if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
                return;
-       if (likely(!rsp->boost)) {
-               rcu_do_batch(rsp, rdp);
+       rcu_do_batch(rsp, rdp);
+}
+
+static void rcu_wake_cond(struct task_struct *t, int status)
+{
+       /*
+        * If the thread is yielding, only wake it when this
+        * is invoked from idle
+        */
+       if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
+               wake_up_process(t);
+}
+
+/*
+ * Wake up this CPU's rcuc kthread to do RCU core processing.
+ */
+static void invoke_rcu_core(void)
+{
+       unsigned long flags;
+       struct task_struct *t;
+
+       if (!cpu_online(smp_processor_id()))
                return;
+       local_irq_save(flags);
+       __this_cpu_write(rcu_cpu_has_work, 1);
+       t = __this_cpu_read(rcu_cpu_kthread_task);
+       if (t != NULL && current != t)
+               rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
+       local_irq_restore(flags);
+}
+
+static void rcu_cpu_kthread_park(unsigned int cpu)
+{
+       per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+}
+
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
+{
+       return __this_cpu_read(rcu_cpu_has_work);
+}
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
+ */
+static void rcu_cpu_kthread(unsigned int cpu)
+{
+       unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
+       char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
+       int spincnt;
+
+       for (spincnt = 0; spincnt < 10; spincnt++) {
+               trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
+               local_bh_disable();
+               *statusp = RCU_KTHREAD_RUNNING;
+               this_cpu_inc(rcu_cpu_kthread_loops);
+               local_irq_disable();
+               work = *workp;
+               *workp = 0;
+               local_irq_enable();
+               if (work)
+                       rcu_process_callbacks();
+               local_bh_enable();
+               if (*workp == 0) {
+                       trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
+                       *statusp = RCU_KTHREAD_WAITING;
+                       return;
+               }
        }
-       invoke_rcu_callbacks_kthread();
+       *statusp = RCU_KTHREAD_YIELDING;
+       trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
+       schedule_timeout_interruptible(2);
+       trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
+       *statusp = RCU_KTHREAD_WAITING;
 }
 
-static void invoke_rcu_core(void)
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+       .store                  = &rcu_cpu_kthread_task,
+       .thread_should_run      = rcu_cpu_kthread_should_run,
+       .thread_fn              = rcu_cpu_kthread,
+       .thread_comm            = "rcuc/%u",
+       .setup                  = rcu_cpu_kthread_setup,
+       .park                   = rcu_cpu_kthread_park,
+};
+
+/*
+ * Spawn per-CPU RCU core processing kthreads.
+ */
+static int __init rcu_spawn_core_kthreads(void)
 {
-       if (cpu_online(smp_processor_id()))
-               raise_softirq(RCU_SOFTIRQ);
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               per_cpu(rcu_cpu_has_work, cpu) = 0;
+       BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
+       return 0;
 }
+early_initcall(rcu_spawn_core_kthreads);
 
 /*
  * Handle any core-RCU processing required by a call_rcu() invocation.
@@ -2862,6 +2975,7 @@ void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
 }
 EXPORT_SYMBOL_GPL(call_rcu_sched);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /*
  * Queue an RCU callback for invocation after a quicker grace period.
  */
@@ -2870,6 +2984,7 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
        __call_rcu(head, func, &rcu_bh_state, -1, 0);
 }
 EXPORT_SYMBOL_GPL(call_rcu_bh);
+#endif
 
 /*
  * Queue an RCU callback for lazy invocation after a grace period.
@@ -2961,6 +3076,7 @@ void synchronize_sched(void)
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /**
  * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
  *
@@ -2987,6 +3103,7 @@ void synchronize_rcu_bh(void)
                wait_rcu_gp(call_rcu_bh);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+#endif
 
 /**
  * get_state_synchronize_rcu - Snapshot current RCU state
@@ -3499,6 +3616,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
        mutex_unlock(&rsp->barrier_mutex);
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /**
  * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
  */
@@ -3507,6 +3625,7 @@ void rcu_barrier_bh(void)
        _rcu_barrier(&rcu_bh_state);
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
+#endif
 
 /**
  * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
@@ -3834,7 +3953,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        }
 
        rsp->rda = rda;
-       init_waitqueue_head(&rsp->gp_wq);
+       init_swait_head(&rsp->gp_wq);
        rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {
                while (i > rnp->grphi)
@@ -3931,7 +4050,6 @@ void __init rcu_init(void)
        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
        rcu_init_one(&rcu_sched_state, &rcu_sched_data);
        __rcu_init_preempt();
-       open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
        /*
         * We don't need protection against CPU-hotplug here because