1 From e266671d9a91f26520462f8b969aa92ccd68d33f Mon Sep 17 00:00:00 2001
2 From: Mike Galbraith <umgwanakikbuti@gmail.com>
3 Date: Fri, 2 May 2014 13:13:34 +0200
4 Subject: [PATCH 135/365] stomp-machine: use lg_global_trylock_relax() to dead
5 with stop_cpus_lock lglock
7 If the stop machinery is called from inactive CPU we cannot use
8 lg_global_lock(), because some other stomp machine invocation might be
9 in progress and the lock can be contended. We cannot schedule from this
10 context, so use the lovely new lg_global_trylock_relax() primitive to
11 do what we used to do via one mutex_trylock()/cpu_relax() loop. We
12 now do that trylock()/relax() across an entire herd of locks. Joy.
14 Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
15 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
17 kernel/stop_machine.c | 25 +++++++++++++++----------
18 1 file changed, 15 insertions(+), 10 deletions(-)
20 diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
21 index f339141..2c5acc8 100644
22 --- a/kernel/stop_machine.c
23 +++ b/kernel/stop_machine.c
24 @@ -276,7 +276,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
25 struct cpu_stop_work work1, work2;
26 struct multi_stop_data msdata;
29 + preempt_disable_nort();
30 msdata = (struct multi_stop_data){
33 @@ -296,11 +296,11 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
36 if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
38 + preempt_enable_nort();
43 + preempt_enable_nort();
45 wait_for_stop_done(&done);
47 @@ -333,17 +333,20 @@ static DEFINE_MUTEX(stop_cpus_mutex);
49 static void queue_stop_cpus_work(const struct cpumask *cpumask,
50 cpu_stop_fn_t fn, void *arg,
51 - struct cpu_stop_done *done)
52 + struct cpu_stop_done *done, bool inactive)
54 struct cpu_stop_work *work;
58 - * Disable preemption while queueing to avoid getting
59 - * preempted by a stopper which might wait for other stoppers
60 - * to enter @fn which can lead to deadlock.
61 + * Make sure that all work is queued on all cpus before
62 + * any of the cpus can execute it.
64 - lg_global_lock(&stop_cpus_lock);
66 + lg_global_lock(&stop_cpus_lock);
68 + lg_global_trylock_relax(&stop_cpus_lock);
70 for_each_cpu(cpu, cpumask) {
71 work = &per_cpu(cpu_stopper.stop_work, cpu);
73 @@ -360,7 +363,7 @@ static int __stop_cpus(const struct cpumask *cpumask,
74 struct cpu_stop_done done;
76 cpu_stop_init_done(&done, cpumask_weight(cpumask));
77 - queue_stop_cpus_work(cpumask, fn, arg, &done);
78 + queue_stop_cpus_work(cpumask, fn, arg, &done, false);
79 wait_for_stop_done(&done);
80 return done.executed ? done.ret : -ENOENT;
82 @@ -558,6 +561,8 @@ static int __init cpu_stop_init(void)
83 INIT_LIST_HEAD(&stopper->works);
86 + lg_lock_init(&stop_cpus_lock, "stop_cpus_lock");
88 BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
89 stop_machine_unpark(raw_smp_processor_id());
90 stop_machine_initialized = true;
91 @@ -654,7 +659,7 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
92 set_state(&msdata, MULTI_STOP_PREPARE);
93 cpu_stop_init_done(&done, num_active_cpus());
94 queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
97 ret = multi_cpu_stop(&msdata);
99 /* Busy wait for completion. */