]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
cpu: Make hotplug.lock a "sleeping" spinlock on RT
authorSteven Rostedt <rostedt@goodmis.org>
Fri, 2 Mar 2012 15:36:57 +0000 (10:36 -0500)
committerMichal Sojka <sojka@merica.cz>
Sun, 13 Sep 2015 07:47:39 +0000 (09:47 +0200)
Tasks can block on hotplug.lock in pin_current_cpu(), but their state
might be != RUNNING. So the mutex wakeup will set the state
unconditionally to RUNNING. That might cause spurious unexpected
wakeups. We could provide a state preserving mutex_lock() function,
but this is semantically backwards. So instead we convert the
hotplug.lock() to a spinlock for RT, which has the state preserving
semantics already.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Carsten Emde <C.Emde@osadl.org>
Cc: John Kacur <jkacur@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Clark Williams <clark.williams@gmail.com>
Cc: stable-rt@vger.kernel.org
Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/cpu.c

index e40edd570e550c7e4533ce410a4d1c6c74ffbf1c..920d3ab6f88a5f187b8276362447d23d05fc7f2a 100644 (file)
@@ -58,10 +58,16 @@ static int cpu_hotplug_disabled;
 
 static struct {
        struct task_struct *active_writer;
+
        /* wait queue to wake up the active_writer */
        wait_queue_head_t wq;
+#ifdef CONFIG_PREEMPT_RT_FULL
+       /* Makes the lock keep the task's state */
+       spinlock_t lock;
+#else
        /* verifies that no writer will get active while readers are active */
        struct mutex lock;
+#endif
        /*
         * Also blocks the new readers during
         * an ongoing cpu hotplug operation.
@@ -74,12 +80,26 @@ static struct {
 } cpu_hotplug = {
        .active_writer = NULL,
        .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
+#ifdef CONFIG_PREEMPT_RT_FULL
+       .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
+#else
        .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+#endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        .dep_map = {.name = "cpu_hotplug.lock" },
 #endif
 };
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock()                rt_spin_lock(&cpu_hotplug.lock)
+# define hotplug_trylock()     rt_spin_trylock(&cpu_hotplug.lock)
+# define hotplug_unlock()      rt_spin_unlock(&cpu_hotplug.lock)
+#else
+# define hotplug_lock()                mutex_lock(&cpu_hotplug.lock)
+# define hotplug_trylock()     mutex_trylock(&cpu_hotplug.lock)
+# define hotplug_unlock()      mutex_unlock(&cpu_hotplug.lock)
+#endif
+
 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
 #define cpuhp_lock_acquire_tryread() \
@@ -116,8 +136,8 @@ retry:
                return;
        }
        preempt_enable();
-       mutex_lock(&cpu_hotplug.lock);
-       mutex_unlock(&cpu_hotplug.lock);
+       hotplug_lock();
+       hotplug_unlock();
        preempt_disable();
        goto retry;
 }
@@ -190,9 +210,9 @@ void get_online_cpus(void)
        if (cpu_hotplug.active_writer == current)
                return;
        cpuhp_lock_acquire_read();
-       mutex_lock(&cpu_hotplug.lock);
+       hotplug_lock();
        atomic_inc(&cpu_hotplug.refcount);
-       mutex_unlock(&cpu_hotplug.lock);
+       hotplug_unlock();
 }
 EXPORT_SYMBOL_GPL(get_online_cpus);
 
@@ -200,11 +220,11 @@ bool try_get_online_cpus(void)
 {
        if (cpu_hotplug.active_writer == current)
                return true;
-       if (!mutex_trylock(&cpu_hotplug.lock))
+       if (!hotplug_trylock())
                return false;
        cpuhp_lock_acquire_tryread();
        atomic_inc(&cpu_hotplug.refcount);
-       mutex_unlock(&cpu_hotplug.lock);
+       hotplug_unlock();
        return true;
 }
 EXPORT_SYMBOL_GPL(try_get_online_cpus);
@@ -258,11 +278,11 @@ void cpu_hotplug_begin(void)
        cpuhp_lock_acquire();
 
        for (;;) {
-               mutex_lock(&cpu_hotplug.lock);
+               hotplug_lock();
                prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
                if (likely(!atomic_read(&cpu_hotplug.refcount)))
                                break;
-               mutex_unlock(&cpu_hotplug.lock);
+               hotplug_unlock();
                schedule();
        }
        finish_wait(&cpu_hotplug.wq, &wait);
@@ -271,7 +291,7 @@ void cpu_hotplug_begin(void)
 void cpu_hotplug_done(void)
 {
        cpu_hotplug.active_writer = NULL;
-       mutex_unlock(&cpu_hotplug.lock);
+       hotplug_unlock();
        cpuhp_lock_release();
 }