]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0286-rtmutex-Make-wait_lock-irq-safe.patch
Fix memguard and related syscalls
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0286-rtmutex-Make-wait_lock-irq-safe.patch
1 From 1e3f440359c24cda88c9bc01c192afdb588c8f0b Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Wed, 13 Jan 2016 11:25:38 +0100
4 Subject: [PATCH 286/366] rtmutex: Make wait_lock irq safe
5
6 Sasha reported a lockdep splat about a potential deadlock between RCU boosting
7 rtmutex and the posix timer it_lock.
8
9 CPU0                                    CPU1
10
11 rtmutex_lock(&rcu->rt_mutex)
12   spin_lock(&rcu->rt_mutex.wait_lock)
13                                         local_irq_disable()
14                                         spin_lock(&timer->it_lock)
15                                         spin_lock(&rcu->mutex.wait_lock)
16 --> Interrupt
17     spin_lock(&timer->it_lock)
18
19 This is caused by the following code sequence on CPU1
20
21      rcu_read_lock()
22      x = lookup();
23      if (x)
24         spin_lock_irqsave(&x->it_lock);
25      rcu_read_unlock();
26      return x;
27
28 We could fix that in the posix timer code by keeping rcu read locked across
29 the spinlocked and irq disabled section, but the above sequence is common and
30 there is no reason not to support it.
31
32 Taking rt_mutex.wait_lock irq safe prevents the deadlock.
33
34 Reported-by: Sasha Levin <sasha.levin@oracle.com>
35 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
36 Cc: Peter Zijlstra <peterz@infradead.org>
37 Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
38 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
39 ---
40  kernel/futex.c           |  18 ++---
41  kernel/locking/rtmutex.c | 178 +++++++++++++++++++++++++----------------------
42  2 files changed, 102 insertions(+), 94 deletions(-)
43
44 diff --git a/kernel/futex.c b/kernel/futex.c
45 index 67cd1f9..ad38af0 100644
46 --- a/kernel/futex.c
47 +++ b/kernel/futex.c
48 @@ -1226,7 +1226,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
49         if (pi_state->owner != current)
50                 return -EINVAL;
51  
52 -       raw_spin_lock(&pi_state->pi_mutex.wait_lock);
53 +       raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
54         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
55  
56         /*
57 @@ -1262,22 +1262,22 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
58                         ret = -EINVAL;
59         }
60         if (ret) {
61 -               raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
62 +               raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
63                 return ret;
64         }
65  
66 -       raw_spin_lock_irq(&pi_state->owner->pi_lock);
67 +       raw_spin_lock(&pi_state->owner->pi_lock);
68         WARN_ON(list_empty(&pi_state->list));
69         list_del_init(&pi_state->list);
70 -       raw_spin_unlock_irq(&pi_state->owner->pi_lock);
71 +       raw_spin_unlock(&pi_state->owner->pi_lock);
72  
73 -       raw_spin_lock_irq(&new_owner->pi_lock);
74 +       raw_spin_lock(&new_owner->pi_lock);
75         WARN_ON(!list_empty(&pi_state->list));
76         list_add(&pi_state->list, &new_owner->pi_state_list);
77         pi_state->owner = new_owner;
78 -       raw_spin_unlock_irq(&new_owner->pi_lock);
79 +       raw_spin_unlock(&new_owner->pi_lock);
80  
81 -       raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
82 +       raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
83  
84         deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
85                                         &wake_sleeper_q);
86 @@ -2154,11 +2154,11 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
87                  * we returned due to timeout or signal without taking the
88                  * rt_mutex. Too late.
89                  */
90 -               raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
91 +               raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
92                 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
93                 if (!owner)
94                         owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
95 -               raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
96 +               raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
97                 ret = fixup_pi_state_owner(uaddr, q, owner);
98                 goto out;
99         }
100 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
101 index e1ddae3..a6f5326 100644
102 --- a/kernel/locking/rtmutex.c
103 +++ b/kernel/locking/rtmutex.c
104 @@ -111,13 +111,14 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
105   * 2) Drop lock->wait_lock
106   * 3) Try to unlock the lock with cmpxchg
107   */
108 -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
109 +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
110 +                                       unsigned long flags)
111         __releases(lock->wait_lock)
112  {
113         struct task_struct *owner = rt_mutex_owner(lock);
114  
115         clear_rt_mutex_waiters(lock);
116 -       raw_spin_unlock(&lock->wait_lock);
117 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
118         /*
119          * If a new waiter comes in between the unlock and the cmpxchg
120          * we have two situations:
121 @@ -159,11 +160,12 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
122  /*
123   * Simple slow path only version: lock->owner is protected by lock->wait_lock.
124   */
125 -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
126 +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
127 +                                       unsigned long flags)
128         __releases(lock->wait_lock)
129  {
130         lock->owner = NULL;
131 -       raw_spin_unlock(&lock->wait_lock);
132 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
133         return true;
134  }
135  #endif
136 @@ -454,7 +456,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
137         int ret = 0, depth = 0;
138         struct rt_mutex *lock;
139         bool detect_deadlock;
140 -       unsigned long flags;
141         bool requeue = true;
142  
143         detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
144 @@ -497,7 +498,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
145         /*
146          * [1] Task cannot go away as we did a get_task() before !
147          */
148 -       raw_spin_lock_irqsave(&task->pi_lock, flags);
149 +       raw_spin_lock_irq(&task->pi_lock);
150  
151         /*
152          * [2] Get the waiter on which @task is blocked on.
153 @@ -581,7 +582,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
154          * operations.
155          */
156         if (!raw_spin_trylock(&lock->wait_lock)) {
157 -               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
158 +               raw_spin_unlock_irq(&task->pi_lock);
159                 cpu_relax();
160                 goto retry;
161         }
162 @@ -612,7 +613,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
163                 /*
164                  * No requeue[7] here. Just release @task [8]
165                  */
166 -               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
167 +               raw_spin_unlock(&task->pi_lock);
168                 put_task_struct(task);
169  
170                 /*
171 @@ -620,14 +621,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
172                  * If there is no owner of the lock, end of chain.
173                  */
174                 if (!rt_mutex_owner(lock)) {
175 -                       raw_spin_unlock(&lock->wait_lock);
176 +                       raw_spin_unlock_irq(&lock->wait_lock);
177                         return 0;
178                 }
179  
180                 /* [10] Grab the next task, i.e. owner of @lock */
181                 task = rt_mutex_owner(lock);
182                 get_task_struct(task);
183 -               raw_spin_lock_irqsave(&task->pi_lock, flags);
184 +               raw_spin_lock(&task->pi_lock);
185  
186                 /*
187                  * No requeue [11] here. We just do deadlock detection.
188 @@ -642,8 +643,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
189                 top_waiter = rt_mutex_top_waiter(lock);
190  
191                 /* [13] Drop locks */
192 -               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
193 -               raw_spin_unlock(&lock->wait_lock);
194 +               raw_spin_unlock(&task->pi_lock);
195 +               raw_spin_unlock_irq(&lock->wait_lock);
196  
197                 /* If owner is not blocked, end of chain. */
198                 if (!next_lock)
199 @@ -664,7 +665,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
200         rt_mutex_enqueue(lock, waiter);
201  
202         /* [8] Release the task */
203 -       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
204 +       raw_spin_unlock(&task->pi_lock);
205         put_task_struct(task);
206  
207         /*
208 @@ -685,14 +686,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
209                 lock_top_waiter = rt_mutex_top_waiter(lock);
210                 if (prerequeue_top_waiter != lock_top_waiter)
211                         rt_mutex_wake_waiter(lock_top_waiter);
212 -               raw_spin_unlock(&lock->wait_lock);
213 +               raw_spin_unlock_irq(&lock->wait_lock);
214                 return 0;
215         }
216  
217         /* [10] Grab the next task, i.e. the owner of @lock */
218         task = rt_mutex_owner(lock);
219         get_task_struct(task);
220 -       raw_spin_lock_irqsave(&task->pi_lock, flags);
221 +       raw_spin_lock(&task->pi_lock);
222  
223         /* [11] requeue the pi waiters if necessary */
224         if (waiter == rt_mutex_top_waiter(lock)) {
225 @@ -746,8 +747,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
226         top_waiter = rt_mutex_top_waiter(lock);
227  
228         /* [13] Drop the locks */
229 -       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
230 -       raw_spin_unlock(&lock->wait_lock);
231 +       raw_spin_unlock(&task->pi_lock);
232 +       raw_spin_unlock_irq(&lock->wait_lock);
233  
234         /*
235          * Make the actual exit decisions [12], based on the stored
236 @@ -770,7 +771,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
237         goto again;
238  
239   out_unlock_pi:
240 -       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
241 +       raw_spin_unlock_irq(&task->pi_lock);
242   out_put_task:
243         put_task_struct(task);
244  
245 @@ -799,7 +800,7 @@ static inline int lock_is_stealable(struct task_struct *task,
246  /*
247   * Try to take an rt-mutex
248   *
249 - * Must be called with lock->wait_lock held.
250 + * Must be called with lock->wait_lock held and interrupts disabled
251   *
252   * @lock:   The lock to be acquired.
253   * @task:   The task which wants to acquire the lock
254 @@ -810,8 +811,6 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
255                                   struct task_struct *task,
256                                   struct rt_mutex_waiter *waiter, int mode)
257  {
258 -       unsigned long flags;
259 -
260         /*
261          * Before testing whether we can acquire @lock, we set the
262          * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
263 @@ -894,7 +893,7 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
264          * case, but conditionals are more expensive than a redundant
265          * store.
266          */
267 -       raw_spin_lock_irqsave(&task->pi_lock, flags);
268 +       raw_spin_lock(&task->pi_lock);
269         task->pi_blocked_on = NULL;
270         /*
271          * Finish the lock acquisition. @task is the new owner. If
272 @@ -903,7 +902,7 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
273          */
274         if (rt_mutex_has_waiters(lock))
275                 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
276 -       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
277 +       raw_spin_unlock(&task->pi_lock);
278  
279  takeit:
280         /* We got the lock. */
281 @@ -979,9 +978,6 @@ static int adaptive_wait(struct rt_mutex *lock,
282  }
283  #endif
284  
285 -# define pi_lock(lock)         raw_spin_lock_irq(lock)
286 -# define pi_unlock(lock)       raw_spin_unlock_irq(lock)
287 -
288  static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
289                                    struct rt_mutex_waiter *waiter,
290                                    struct task_struct *task,
291 @@ -997,14 +993,15 @@ static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
292  {
293         struct task_struct *lock_owner, *self = current;
294         struct rt_mutex_waiter waiter, *top_waiter;
295 +       unsigned long flags;
296         int ret;
297  
298         rt_mutex_init_waiter(&waiter, true);
299  
300 -       raw_spin_lock(&lock->wait_lock);
301 +       raw_spin_lock_irqsave(&lock->wait_lock, flags);
302  
303         if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
304 -               raw_spin_unlock(&lock->wait_lock);
305 +               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
306                 return;
307         }
308  
309 @@ -1016,10 +1013,10 @@ static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
310          * as well. We are serialized via pi_lock against wakeups. See
311          * try_to_wake_up().
312          */
313 -       pi_lock(&self->pi_lock);
314 +       raw_spin_lock(&self->pi_lock);
315         self->saved_state = self->state;
316         __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
317 -       pi_unlock(&self->pi_lock);
318 +       raw_spin_unlock(&self->pi_lock);
319  
320         ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
321         BUG_ON(ret);
322 @@ -1032,18 +1029,18 @@ static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
323                 top_waiter = rt_mutex_top_waiter(lock);
324                 lock_owner = rt_mutex_owner(lock);
325  
326 -               raw_spin_unlock(&lock->wait_lock);
327 +               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
328  
329                 debug_rt_mutex_print_deadlock(&waiter);
330  
331                 if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
332                         schedule();
333  
334 -               raw_spin_lock(&lock->wait_lock);
335 +               raw_spin_lock_irqsave(&lock->wait_lock, flags);
336  
337 -               pi_lock(&self->pi_lock);
338 +               raw_spin_lock(&self->pi_lock);
339                 __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
340 -               pi_unlock(&self->pi_lock);
341 +               raw_spin_unlock(&self->pi_lock);
342         }
343  
344         /*
345 @@ -1053,10 +1050,10 @@ static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
346          * happened while we were blocked. Clear saved_state so
347          * try_to_wakeup() does not get confused.
348          */
349 -       pi_lock(&self->pi_lock);
350 +       raw_spin_lock(&self->pi_lock);
351         __set_current_state_no_track(self->saved_state);
352         self->saved_state = TASK_RUNNING;
353 -       pi_unlock(&self->pi_lock);
354 +       raw_spin_unlock(&self->pi_lock);
355  
356         /*
357          * try_to_take_rt_mutex() sets the waiter bit
358 @@ -1067,7 +1064,7 @@ static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
359         BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
360         BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
361  
362 -       raw_spin_unlock(&lock->wait_lock);
363 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
364  
365         debug_rt_mutex_free_waiter(&waiter);
366  }
367 @@ -1080,10 +1077,11 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
368   */
369  static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
370  {
371 +       unsigned long flags;
372         WAKE_Q(wake_q);
373         WAKE_Q(wake_sleeper_q);
374  
375 -       raw_spin_lock(&lock->wait_lock);
376 +       raw_spin_lock_irqsave(&lock->wait_lock, flags);
377  
378         debug_rt_mutex_unlock(lock);
379  
380 @@ -1091,13 +1089,13 @@ static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
381  
382         if (!rt_mutex_has_waiters(lock)) {
383                 lock->owner = NULL;
384 -               raw_spin_unlock(&lock->wait_lock);
385 +               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
386                 return;
387         }
388  
389         mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
390  
391 -       raw_spin_unlock(&lock->wait_lock);
392 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
393         wake_up_q(&wake_q);
394         wake_up_q_sleeper(&wake_sleeper_q);
395  
396 @@ -1273,7 +1271,7 @@ try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
397   *
398   * Prepare waiter and propagate pi chain
399   *
400 - * This must be called with lock->wait_lock held.
401 + * This must be called with lock->wait_lock held and interrupts disabled
402   */
403  static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
404                                    struct rt_mutex_waiter *waiter,
405 @@ -1284,7 +1282,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
406         struct rt_mutex_waiter *top_waiter = waiter;
407         struct rt_mutex *next_lock;
408         int chain_walk = 0, res;
409 -       unsigned long flags;
410  
411         /*
412          * Early deadlock detection. We really don't want the task to
413 @@ -1298,7 +1295,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
414         if (owner == task)
415                 return -EDEADLK;
416  
417 -       raw_spin_lock_irqsave(&task->pi_lock, flags);
418 +       raw_spin_lock(&task->pi_lock);
419  
420         /*
421          * In the case of futex requeue PI, this will be a proxy
422 @@ -1310,7 +1307,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
423          * the task if PI_WAKEUP_INPROGRESS is set.
424          */
425         if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
426 -               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
427 +               raw_spin_unlock(&task->pi_lock);
428                 return -EAGAIN;
429         }
430  
431 @@ -1328,12 +1325,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
432  
433         task->pi_blocked_on = waiter;
434  
435 -       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
436 +       raw_spin_unlock(&task->pi_lock);
437  
438         if (!owner)
439                 return 0;
440  
441 -       raw_spin_lock_irqsave(&owner->pi_lock, flags);
442 +       raw_spin_lock(&owner->pi_lock);
443         if (waiter == rt_mutex_top_waiter(lock)) {
444                 rt_mutex_dequeue_pi(owner, top_waiter);
445                 rt_mutex_enqueue_pi(owner, waiter);
446 @@ -1348,7 +1345,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
447         /* Store the lock on which owner is blocked or NULL */
448         next_lock = task_blocked_on_lock(owner);
449  
450 -       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
451 +       raw_spin_unlock(&owner->pi_lock);
452         /*
453          * Even if full deadlock detection is on, if the owner is not
454          * blocked itself, we can avoid finding this out in the chain
455 @@ -1364,12 +1361,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
456          */
457         get_task_struct(owner);
458  
459 -       raw_spin_unlock(&lock->wait_lock);
460 +       raw_spin_unlock_irq(&lock->wait_lock);
461  
462         res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
463                                          next_lock, waiter, task);
464  
465 -       raw_spin_lock(&lock->wait_lock);
466 +       raw_spin_lock_irq(&lock->wait_lock);
467  
468         return res;
469  }
470 @@ -1378,16 +1375,15 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
471   * Remove the top waiter from the current tasks pi waiter tree and
472   * queue it up.
473   *
474 - * Called with lock->wait_lock held.
475 + * Called with lock->wait_lock held and interrupts disabled.
476   */
477  static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
478                                     struct wake_q_head *wake_sleeper_q,
479                                     struct rt_mutex *lock)
480  {
481         struct rt_mutex_waiter *waiter;
482 -       unsigned long flags;
483  
484 -       raw_spin_lock_irqsave(&current->pi_lock, flags);
485 +       raw_spin_lock(&current->pi_lock);
486  
487         waiter = rt_mutex_top_waiter(lock);
488  
489 @@ -1409,7 +1405,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
490          */
491         lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
492  
493 -       raw_spin_unlock_irqrestore(&current->pi_lock, flags);
494 +       raw_spin_unlock(&current->pi_lock);
495  
496         if (waiter->savestate)
497                 wake_q_add(wake_sleeper_q, waiter->task);
498 @@ -1420,7 +1416,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
499  /*
500   * Remove a waiter from a lock and give up
501   *
502 - * Must be called with lock->wait_lock held and
503 + * Must be called with lock->wait_lock held and interrupts disabled. I must
504   * have just failed to try_to_take_rt_mutex().
505   */
506  static void remove_waiter(struct rt_mutex *lock,
507 @@ -1429,12 +1425,11 @@ static void remove_waiter(struct rt_mutex *lock,
508         bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
509         struct task_struct *owner = rt_mutex_owner(lock);
510         struct rt_mutex *next_lock = NULL;
511 -       unsigned long flags;
512  
513 -       raw_spin_lock_irqsave(&current->pi_lock, flags);
514 +       raw_spin_lock(&current->pi_lock);
515         rt_mutex_dequeue(lock, waiter);
516         current->pi_blocked_on = NULL;
517 -       raw_spin_unlock_irqrestore(&current->pi_lock, flags);
518 +       raw_spin_unlock(&current->pi_lock);
519  
520         /*
521          * Only update priority if the waiter was the highest priority
522 @@ -1443,7 +1438,7 @@ static void remove_waiter(struct rt_mutex *lock,
523         if (!owner || !is_top_waiter)
524                 return;
525  
526 -       raw_spin_lock_irqsave(&owner->pi_lock, flags);
527 +       raw_spin_lock(&owner->pi_lock);
528  
529         rt_mutex_dequeue_pi(owner, waiter);
530  
531 @@ -1456,7 +1451,7 @@ static void remove_waiter(struct rt_mutex *lock,
532         if (rt_mutex_real_waiter(owner->pi_blocked_on))
533                 next_lock = task_blocked_on_lock(owner);
534  
535 -       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
536 +       raw_spin_unlock(&owner->pi_lock);
537  
538         /*
539          * Don't walk the chain, if the owner task is not blocked
540 @@ -1468,12 +1463,12 @@ static void remove_waiter(struct rt_mutex *lock,
541         /* gets dropped in rt_mutex_adjust_prio_chain()! */
542         get_task_struct(owner);
543  
544 -       raw_spin_unlock(&lock->wait_lock);
545 +       raw_spin_unlock_irq(&lock->wait_lock);
546  
547         rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
548                                    next_lock, NULL, current);
549  
550 -       raw_spin_lock(&lock->wait_lock);
551 +       raw_spin_lock_irq(&lock->wait_lock);
552  }
553  
554  /*
555 @@ -1509,11 +1504,11 @@ void rt_mutex_adjust_pi(struct task_struct *task)
556   * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
557   * @lock:               the rt_mutex to take
558   * @state:              the state the task should block in (TASK_INTERRUPTIBLE
559 - *                      or TASK_UNINTERRUPTIBLE)
560 + *                      or TASK_UNINTERRUPTIBLE)
561   * @timeout:            the pre-initialized and started timer, or NULL for none
562   * @waiter:             the pre-initialized rt_mutex_waiter
563   *
564 - * lock->wait_lock must be held by the caller.
565 + * Must be called with lock->wait_lock held and interrupts disabled
566   */
567  static int __sched
568  __rt_mutex_slowlock(struct rt_mutex *lock, int state,
569 @@ -1548,13 +1543,13 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
570                                 break;
571                 }
572  
573 -               raw_spin_unlock(&lock->wait_lock);
574 +               raw_spin_unlock_irq(&lock->wait_lock);
575  
576                 debug_rt_mutex_print_deadlock(waiter);
577  
578                 schedule();
579  
580 -               raw_spin_lock(&lock->wait_lock);
581 +               raw_spin_lock_irq(&lock->wait_lock);
582                 set_current_state(state);
583         }
584  
585 @@ -1668,17 +1663,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
586                   struct ww_acquire_ctx *ww_ctx)
587  {
588         struct rt_mutex_waiter waiter;
589 +       unsigned long flags;
590         int ret = 0;
591  
592         rt_mutex_init_waiter(&waiter, false);
593  
594 -       raw_spin_lock(&lock->wait_lock);
595 +       /*
596 +        * Technically we could use raw_spin_[un]lock_irq() here, but this can
597 +        * be called in early boot if the cmpxchg() fast path is disabled
598 +        * (debug, no architecture support). In this case we will acquire the
599 +        * rtmutex with lock->wait_lock held. But we cannot unconditionally
600 +        * enable interrupts in that early boot case. So we need to use the
601 +        * irqsave/restore variants.
602 +        */
603 +       raw_spin_lock_irqsave(&lock->wait_lock, flags);
604  
605         /* Try to acquire the lock again: */
606         if (try_to_take_rt_mutex(lock, current, NULL)) {
607                 if (ww_ctx)
608                         ww_mutex_account_lock(lock, ww_ctx);
609 -               raw_spin_unlock(&lock->wait_lock);
610 +               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
611                 return 0;
612         }
613  
614 @@ -1717,7 +1721,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
615          */
616         fixup_rt_mutex_waiters(lock);
617  
618 -       raw_spin_unlock(&lock->wait_lock);
619 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
620  
621         /* Remove pending timer: */
622         if (unlikely(timeout))
623 @@ -1733,6 +1737,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
624   */
625  static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
626  {
627 +       unsigned long flags;
628         int ret;
629  
630         /*
631 @@ -1744,10 +1749,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
632                 return 0;
633  
634         /*
635 -        * The mutex has currently no owner. Lock the wait lock and
636 -        * try to acquire the lock.
637 +        * The mutex has currently no owner. Lock the wait lock and try to
638 +        * acquire the lock. We use irqsave here to support early boot calls.
639          */
640 -       raw_spin_lock(&lock->wait_lock);
641 +       raw_spin_lock_irqsave(&lock->wait_lock, flags);
642  
643         ret = try_to_take_rt_mutex(lock, current, NULL);
644  
645 @@ -1757,7 +1762,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
646          */
647         fixup_rt_mutex_waiters(lock);
648  
649 -       raw_spin_unlock(&lock->wait_lock);
650 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
651  
652         return ret;
653  }
654 @@ -1770,7 +1775,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
655                                         struct wake_q_head *wake_q,
656                                         struct wake_q_head *wake_sleeper_q)
657  {
658 -       raw_spin_lock(&lock->wait_lock);
659 +       unsigned long flags;
660 +
661 +       /* irqsave required to support early boot calls */
662 +       raw_spin_lock_irqsave(&lock->wait_lock, flags);
663  
664         debug_rt_mutex_unlock(lock);
665  
666 @@ -1809,10 +1817,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
667          */
668         while (!rt_mutex_has_waiters(lock)) {
669                 /* Drops lock->wait_lock ! */
670 -               if (unlock_rt_mutex_safe(lock) == true)
671 +               if (unlock_rt_mutex_safe(lock, flags) == true)
672                         return false;
673                 /* Relock the rtmutex and try again */
674 -               raw_spin_lock(&lock->wait_lock);
675 +               raw_spin_lock_irqsave(&lock->wait_lock, flags);
676         }
677  
678         /*
679 @@ -1823,7 +1831,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
680          */
681         mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
682  
683 -       raw_spin_unlock(&lock->wait_lock);
684 +       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
685  
686         /* check PI boosting */
687         return true;
688 @@ -2135,10 +2143,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
689  {
690         int ret;
691  
692 -       raw_spin_lock(&lock->wait_lock);
693 +       raw_spin_lock_irq(&lock->wait_lock);
694  
695         if (try_to_take_rt_mutex(lock, task, NULL)) {
696 -               raw_spin_unlock(&lock->wait_lock);
697 +               raw_spin_unlock_irq(&lock->wait_lock);
698                 return 1;
699         }
700  
701 @@ -2161,14 +2169,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
702          * PI_REQUEUE_INPROGRESS, so that if the task is waking up
703          * it will know that we are in the process of requeuing it.
704          */
705 -       raw_spin_lock_irq(&task->pi_lock);
706 +       raw_spin_lock(&task->pi_lock);
707         if (task->pi_blocked_on) {
708 -               raw_spin_unlock_irq(&task->pi_lock);
709 -               raw_spin_unlock(&lock->wait_lock);
710 +               raw_spin_unlock(&task->pi_lock);
711 +               raw_spin_unlock_irq(&lock->wait_lock);
712                 return -EAGAIN;
713         }
714         task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
715 -       raw_spin_unlock_irq(&task->pi_lock);
716 +       raw_spin_unlock(&task->pi_lock);
717  #endif
718  
719         /* We enforce deadlock detection for futexes */
720 @@ -2188,7 +2196,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
721         if (ret && rt_mutex_has_waiters(lock))
722                 remove_waiter(lock, waiter);
723  
724 -       raw_spin_unlock(&lock->wait_lock);
725 +       raw_spin_unlock_irq(&lock->wait_lock);
726  
727         debug_rt_mutex_print_deadlock(waiter);
728  
729 @@ -2236,7 +2244,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
730  {
731         int ret;
732  
733 -       raw_spin_lock(&lock->wait_lock);
734 +       raw_spin_lock_irq(&lock->wait_lock);
735  
736         set_current_state(TASK_INTERRUPTIBLE);
737  
738 @@ -2252,7 +2260,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
739          */
740         fixup_rt_mutex_waiters(lock);
741  
742 -       raw_spin_unlock(&lock->wait_lock);
743 +       raw_spin_unlock_irq(&lock->wait_lock);
744  
745         return ret;
746  }
747 -- 
748 1.9.1
749