1 From 6932ea4614fbc294cb2213492bec79f6a0cb0b74 Mon Sep 17 00:00:00 2001
2 From: Steven Rostedt <rostedt@goodmis.org>
3 Date: Fri, 2 Mar 2012 10:36:57 -0500
4 Subject: [PATCH 218/366] cpu: Make hotplug.lock a "sleeping" spinlock on RT
6 Tasks can block on hotplug.lock in pin_current_cpu(), but their state
7 might be != RUNNING. So the mutex wakeup will set the state
8 unconditionally to RUNNING. That might cause spurious unexpected
9 wakeups. We could provide a state preserving mutex_lock() function,
10 but this is semantically backwards. So instead we convert the
11 hotplug.lock() to a spinlock for RT, which has the state preserving
14 Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
15 Cc: Carsten Emde <C.Emde@osadl.org>
16 Cc: John Kacur <jkacur@redhat.com>
17 Cc: Peter Zijlstra <peterz@infradead.org>
18 Cc: Clark Williams <clark.williams@gmail.com>
20 Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
21 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
23 kernel/cpu.c | 34 +++++++++++++++++++++++++++-------
24 1 file changed, 27 insertions(+), 7 deletions(-)
26 diff --git a/kernel/cpu.c b/kernel/cpu.c
27 index 494b606..5d8be91 100644
30 @@ -62,10 +62,16 @@ static int cpu_hotplug_disabled;
33 struct task_struct *active_writer;
35 /* wait queue to wake up the active_writer */
37 +#ifdef CONFIG_PREEMPT_RT_FULL
38 + /* Makes the lock keep the task's state */
41 /* verifies that no writer will get active while readers are active */
45 * Also blocks the new readers during
46 * an ongoing cpu hotplug operation.
47 @@ -78,12 +84,26 @@ static struct {
49 .active_writer = NULL,
50 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
51 +#ifdef CONFIG_PREEMPT_RT_FULL
52 + .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
54 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
56 #ifdef CONFIG_DEBUG_LOCK_ALLOC
57 .dep_map = {.name = "cpu_hotplug.lock" },
61 +#ifdef CONFIG_PREEMPT_RT_FULL
62 +# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
63 +# define hotplug_trylock() rt_spin_trylock(&cpu_hotplug.lock)
64 +# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
66 +# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
67 +# define hotplug_trylock() mutex_trylock(&cpu_hotplug.lock)
68 +# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
71 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
72 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
73 #define cpuhp_lock_acquire_tryread() \
74 @@ -120,8 +140,8 @@ retry:
78 - mutex_lock(&cpu_hotplug.lock);
79 - mutex_unlock(&cpu_hotplug.lock);
85 @@ -194,9 +214,9 @@ void get_online_cpus(void)
86 if (cpu_hotplug.active_writer == current)
88 cpuhp_lock_acquire_read();
89 - mutex_lock(&cpu_hotplug.lock);
91 atomic_inc(&cpu_hotplug.refcount);
92 - mutex_unlock(&cpu_hotplug.lock);
95 EXPORT_SYMBOL_GPL(get_online_cpus);
97 @@ -249,11 +269,11 @@ void cpu_hotplug_begin(void)
101 - mutex_lock(&cpu_hotplug.lock);
103 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
104 if (likely(!atomic_read(&cpu_hotplug.refcount)))
106 - mutex_unlock(&cpu_hotplug.lock);
110 finish_wait(&cpu_hotplug.wq, &wait);
111 @@ -262,7 +282,7 @@ void cpu_hotplug_begin(void)
112 void cpu_hotplug_done(void)
114 cpu_hotplug.active_writer = NULL;
115 - mutex_unlock(&cpu_hotplug.lock);
117 cpuhp_lock_release();