1 From 1e3f440359c24cda88c9bc01c192afdb588c8f0b Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Wed, 13 Jan 2016 11:25:38 +0100
4 Subject: [PATCH 286/366] rtmutex: Make wait_lock irq safe
6 Sasha reported a lockdep splat about a potential deadlock between RCU boosting
7 rtmutex and the posix timer it_lock.
11 rtmutex_lock(&rcu->rt_mutex)
12 spin_lock(&rcu->rt_mutex.wait_lock)
14 spin_lock(&timer->it_lock)
15 spin_lock(&rcu->mutex.wait_lock)
17 spin_lock(&timer->it_lock)
19 This is caused by the following code sequence on CPU1
24 spin_lock_irqsave(&x->it_lock);
28 We could fix that in the posix timer code by keeping rcu read locked across
29 the spinlocked and irq disabled section, but the above sequence is common and
30 there is no reason not to support it.
32 Taking rt_mutex.wait_lock irq safe prevents the deadlock.
34 Reported-by: Sasha Levin <sasha.levin@oracle.com>
35 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
36 Cc: Peter Zijlstra <peterz@infradead.org>
37 Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
38 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
40 kernel/futex.c | 18 ++---
41 kernel/locking/rtmutex.c | 178 +++++++++++++++++++++++++----------------------
42 2 files changed, 102 insertions(+), 94 deletions(-)
44 diff --git a/kernel/futex.c b/kernel/futex.c
45 index 67cd1f9..ad38af0 100644
48 @@ -1226,7 +1226,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
49 if (pi_state->owner != current)
52 - raw_spin_lock(&pi_state->pi_mutex.wait_lock);
53 + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
54 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
57 @@ -1262,22 +1262,22 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
61 - raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
62 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
66 - raw_spin_lock_irq(&pi_state->owner->pi_lock);
67 + raw_spin_lock(&pi_state->owner->pi_lock);
68 WARN_ON(list_empty(&pi_state->list));
69 list_del_init(&pi_state->list);
70 - raw_spin_unlock_irq(&pi_state->owner->pi_lock);
71 + raw_spin_unlock(&pi_state->owner->pi_lock);
73 - raw_spin_lock_irq(&new_owner->pi_lock);
74 + raw_spin_lock(&new_owner->pi_lock);
75 WARN_ON(!list_empty(&pi_state->list));
76 list_add(&pi_state->list, &new_owner->pi_state_list);
77 pi_state->owner = new_owner;
78 - raw_spin_unlock_irq(&new_owner->pi_lock);
79 + raw_spin_unlock(&new_owner->pi_lock);
81 - raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
82 + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
84 deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
86 @@ -2154,11 +2154,11 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
87 * we returned due to timeout or signal without taking the
90 - raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
91 + raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
92 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
94 owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
95 - raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
96 + raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
97 ret = fixup_pi_state_owner(uaddr, q, owner);
100 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
101 index e1ddae3..a6f5326 100644
102 --- a/kernel/locking/rtmutex.c
103 +++ b/kernel/locking/rtmutex.c
104 @@ -111,13 +111,14 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
105 * 2) Drop lock->wait_lock
106 * 3) Try to unlock the lock with cmpxchg
108 -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
109 +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
110 + unsigned long flags)
111 __releases(lock->wait_lock)
113 struct task_struct *owner = rt_mutex_owner(lock);
115 clear_rt_mutex_waiters(lock);
116 - raw_spin_unlock(&lock->wait_lock);
117 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
119 * If a new waiter comes in between the unlock and the cmpxchg
120 * we have two situations:
121 @@ -159,11 +160,12 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
123 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
125 -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
126 +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
127 + unsigned long flags)
128 __releases(lock->wait_lock)
131 - raw_spin_unlock(&lock->wait_lock);
132 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
136 @@ -454,7 +456,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
137 int ret = 0, depth = 0;
138 struct rt_mutex *lock;
139 bool detect_deadlock;
140 - unsigned long flags;
143 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
144 @@ -497,7 +498,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
146 * [1] Task cannot go away as we did a get_task() before !
148 - raw_spin_lock_irqsave(&task->pi_lock, flags);
149 + raw_spin_lock_irq(&task->pi_lock);
152 * [2] Get the waiter on which @task is blocked on.
153 @@ -581,7 +582,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
156 if (!raw_spin_trylock(&lock->wait_lock)) {
157 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
158 + raw_spin_unlock_irq(&task->pi_lock);
162 @@ -612,7 +613,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
164 * No requeue[7] here. Just release @task [8]
166 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
167 + raw_spin_unlock(&task->pi_lock);
168 put_task_struct(task);
171 @@ -620,14 +621,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
172 * If there is no owner of the lock, end of chain.
174 if (!rt_mutex_owner(lock)) {
175 - raw_spin_unlock(&lock->wait_lock);
176 + raw_spin_unlock_irq(&lock->wait_lock);
180 /* [10] Grab the next task, i.e. owner of @lock */
181 task = rt_mutex_owner(lock);
182 get_task_struct(task);
183 - raw_spin_lock_irqsave(&task->pi_lock, flags);
184 + raw_spin_lock(&task->pi_lock);
187 * No requeue [11] here. We just do deadlock detection.
188 @@ -642,8 +643,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
189 top_waiter = rt_mutex_top_waiter(lock);
191 /* [13] Drop locks */
192 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
193 - raw_spin_unlock(&lock->wait_lock);
194 + raw_spin_unlock(&task->pi_lock);
195 + raw_spin_unlock_irq(&lock->wait_lock);
197 /* If owner is not blocked, end of chain. */
199 @@ -664,7 +665,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
200 rt_mutex_enqueue(lock, waiter);
202 /* [8] Release the task */
203 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
204 + raw_spin_unlock(&task->pi_lock);
205 put_task_struct(task);
208 @@ -685,14 +686,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
209 lock_top_waiter = rt_mutex_top_waiter(lock);
210 if (prerequeue_top_waiter != lock_top_waiter)
211 rt_mutex_wake_waiter(lock_top_waiter);
212 - raw_spin_unlock(&lock->wait_lock);
213 + raw_spin_unlock_irq(&lock->wait_lock);
217 /* [10] Grab the next task, i.e. the owner of @lock */
218 task = rt_mutex_owner(lock);
219 get_task_struct(task);
220 - raw_spin_lock_irqsave(&task->pi_lock, flags);
221 + raw_spin_lock(&task->pi_lock);
223 /* [11] requeue the pi waiters if necessary */
224 if (waiter == rt_mutex_top_waiter(lock)) {
225 @@ -746,8 +747,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
226 top_waiter = rt_mutex_top_waiter(lock);
228 /* [13] Drop the locks */
229 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
230 - raw_spin_unlock(&lock->wait_lock);
231 + raw_spin_unlock(&task->pi_lock);
232 + raw_spin_unlock_irq(&lock->wait_lock);
235 * Make the actual exit decisions [12], based on the stored
236 @@ -770,7 +771,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
240 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
241 + raw_spin_unlock_irq(&task->pi_lock);
243 put_task_struct(task);
245 @@ -799,7 +800,7 @@ static inline int lock_is_stealable(struct task_struct *task,
247 * Try to take an rt-mutex
249 - * Must be called with lock->wait_lock held.
250 + * Must be called with lock->wait_lock held and interrupts disabled
252 * @lock: The lock to be acquired.
253 * @task: The task which wants to acquire the lock
254 @@ -810,8 +811,6 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
255 struct task_struct *task,
256 struct rt_mutex_waiter *waiter, int mode)
258 - unsigned long flags;
261 * Before testing whether we can acquire @lock, we set the
262 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
263 @@ -894,7 +893,7 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
264 * case, but conditionals are more expensive than a redundant
267 - raw_spin_lock_irqsave(&task->pi_lock, flags);
268 + raw_spin_lock(&task->pi_lock);
269 task->pi_blocked_on = NULL;
271 * Finish the lock acquisition. @task is the new owner. If
272 @@ -903,7 +902,7 @@ static int __try_to_take_rt_mutex(struct rt_mutex *lock,
274 if (rt_mutex_has_waiters(lock))
275 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
276 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
277 + raw_spin_unlock(&task->pi_lock);
280 /* We got the lock. */
281 @@ -979,9 +978,6 @@ static int adaptive_wait(struct rt_mutex *lock,
285 -# define pi_lock(lock) raw_spin_lock_irq(lock)
286 -# define pi_unlock(lock) raw_spin_unlock_irq(lock)
288 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
289 struct rt_mutex_waiter *waiter,
290 struct task_struct *task,
291 @@ -997,14 +993,15 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
293 struct task_struct *lock_owner, *self = current;
294 struct rt_mutex_waiter waiter, *top_waiter;
295 + unsigned long flags;
298 rt_mutex_init_waiter(&waiter, true);
300 - raw_spin_lock(&lock->wait_lock);
301 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
303 if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
304 - raw_spin_unlock(&lock->wait_lock);
305 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
309 @@ -1016,10 +1013,10 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
310 * as well. We are serialized via pi_lock against wakeups. See
313 - pi_lock(&self->pi_lock);
314 + raw_spin_lock(&self->pi_lock);
315 self->saved_state = self->state;
316 __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
317 - pi_unlock(&self->pi_lock);
318 + raw_spin_unlock(&self->pi_lock);
320 ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
322 @@ -1032,18 +1029,18 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
323 top_waiter = rt_mutex_top_waiter(lock);
324 lock_owner = rt_mutex_owner(lock);
326 - raw_spin_unlock(&lock->wait_lock);
327 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
329 debug_rt_mutex_print_deadlock(&waiter);
331 if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
334 - raw_spin_lock(&lock->wait_lock);
335 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
337 - pi_lock(&self->pi_lock);
338 + raw_spin_lock(&self->pi_lock);
339 __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
340 - pi_unlock(&self->pi_lock);
341 + raw_spin_unlock(&self->pi_lock);
345 @@ -1053,10 +1050,10 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
346 * happened while we were blocked. Clear saved_state so
347 * try_to_wakeup() does not get confused.
349 - pi_lock(&self->pi_lock);
350 + raw_spin_lock(&self->pi_lock);
351 __set_current_state_no_track(self->saved_state);
352 self->saved_state = TASK_RUNNING;
353 - pi_unlock(&self->pi_lock);
354 + raw_spin_unlock(&self->pi_lock);
357 * try_to_take_rt_mutex() sets the waiter bit
358 @@ -1067,7 +1064,7 @@ static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
359 BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
360 BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
362 - raw_spin_unlock(&lock->wait_lock);
363 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
365 debug_rt_mutex_free_waiter(&waiter);
367 @@ -1080,10 +1077,11 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
369 static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
371 + unsigned long flags;
373 WAKE_Q(wake_sleeper_q);
375 - raw_spin_lock(&lock->wait_lock);
376 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
378 debug_rt_mutex_unlock(lock);
380 @@ -1091,13 +1089,13 @@ static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
382 if (!rt_mutex_has_waiters(lock)) {
384 - raw_spin_unlock(&lock->wait_lock);
385 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
389 mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
391 - raw_spin_unlock(&lock->wait_lock);
392 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
394 wake_up_q_sleeper(&wake_sleeper_q);
396 @@ -1273,7 +1271,7 @@ try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
398 * Prepare waiter and propagate pi chain
400 - * This must be called with lock->wait_lock held.
401 + * This must be called with lock->wait_lock held and interrupts disabled
403 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
404 struct rt_mutex_waiter *waiter,
405 @@ -1284,7 +1282,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
406 struct rt_mutex_waiter *top_waiter = waiter;
407 struct rt_mutex *next_lock;
408 int chain_walk = 0, res;
409 - unsigned long flags;
412 * Early deadlock detection. We really don't want the task to
413 @@ -1298,7 +1295,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
417 - raw_spin_lock_irqsave(&task->pi_lock, flags);
418 + raw_spin_lock(&task->pi_lock);
421 * In the case of futex requeue PI, this will be a proxy
422 @@ -1310,7 +1307,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
423 * the task if PI_WAKEUP_INPROGRESS is set.
425 if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
426 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
427 + raw_spin_unlock(&task->pi_lock);
431 @@ -1328,12 +1325,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
433 task->pi_blocked_on = waiter;
435 - raw_spin_unlock_irqrestore(&task->pi_lock, flags);
436 + raw_spin_unlock(&task->pi_lock);
441 - raw_spin_lock_irqsave(&owner->pi_lock, flags);
442 + raw_spin_lock(&owner->pi_lock);
443 if (waiter == rt_mutex_top_waiter(lock)) {
444 rt_mutex_dequeue_pi(owner, top_waiter);
445 rt_mutex_enqueue_pi(owner, waiter);
446 @@ -1348,7 +1345,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
447 /* Store the lock on which owner is blocked or NULL */
448 next_lock = task_blocked_on_lock(owner);
450 - raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
451 + raw_spin_unlock(&owner->pi_lock);
453 * Even if full deadlock detection is on, if the owner is not
454 * blocked itself, we can avoid finding this out in the chain
455 @@ -1364,12 +1361,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
457 get_task_struct(owner);
459 - raw_spin_unlock(&lock->wait_lock);
460 + raw_spin_unlock_irq(&lock->wait_lock);
462 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
463 next_lock, waiter, task);
465 - raw_spin_lock(&lock->wait_lock);
466 + raw_spin_lock_irq(&lock->wait_lock);
470 @@ -1378,16 +1375,15 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
471 * Remove the top waiter from the current tasks pi waiter tree and
474 - * Called with lock->wait_lock held.
475 + * Called with lock->wait_lock held and interrupts disabled.
477 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
478 struct wake_q_head *wake_sleeper_q,
479 struct rt_mutex *lock)
481 struct rt_mutex_waiter *waiter;
482 - unsigned long flags;
484 - raw_spin_lock_irqsave(¤t->pi_lock, flags);
485 + raw_spin_lock(¤t->pi_lock);
487 waiter = rt_mutex_top_waiter(lock);
489 @@ -1409,7 +1405,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
491 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
493 - raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
494 + raw_spin_unlock(¤t->pi_lock);
496 if (waiter->savestate)
497 wake_q_add(wake_sleeper_q, waiter->task);
498 @@ -1420,7 +1416,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
500 * Remove a waiter from a lock and give up
502 - * Must be called with lock->wait_lock held and
503 + * Must be called with lock->wait_lock held and interrupts disabled. I must
504 * have just failed to try_to_take_rt_mutex().
506 static void remove_waiter(struct rt_mutex *lock,
507 @@ -1429,12 +1425,11 @@ static void remove_waiter(struct rt_mutex *lock,
508 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
509 struct task_struct *owner = rt_mutex_owner(lock);
510 struct rt_mutex *next_lock = NULL;
511 - unsigned long flags;
513 - raw_spin_lock_irqsave(¤t->pi_lock, flags);
514 + raw_spin_lock(¤t->pi_lock);
515 rt_mutex_dequeue(lock, waiter);
516 current->pi_blocked_on = NULL;
517 - raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
518 + raw_spin_unlock(¤t->pi_lock);
521 * Only update priority if the waiter was the highest priority
522 @@ -1443,7 +1438,7 @@ static void remove_waiter(struct rt_mutex *lock,
523 if (!owner || !is_top_waiter)
526 - raw_spin_lock_irqsave(&owner->pi_lock, flags);
527 + raw_spin_lock(&owner->pi_lock);
529 rt_mutex_dequeue_pi(owner, waiter);
531 @@ -1456,7 +1451,7 @@ static void remove_waiter(struct rt_mutex *lock,
532 if (rt_mutex_real_waiter(owner->pi_blocked_on))
533 next_lock = task_blocked_on_lock(owner);
535 - raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
536 + raw_spin_unlock(&owner->pi_lock);
539 * Don't walk the chain, if the owner task is not blocked
540 @@ -1468,12 +1463,12 @@ static void remove_waiter(struct rt_mutex *lock,
541 /* gets dropped in rt_mutex_adjust_prio_chain()! */
542 get_task_struct(owner);
544 - raw_spin_unlock(&lock->wait_lock);
545 + raw_spin_unlock_irq(&lock->wait_lock);
547 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
548 next_lock, NULL, current);
550 - raw_spin_lock(&lock->wait_lock);
551 + raw_spin_lock_irq(&lock->wait_lock);
555 @@ -1509,11 +1504,11 @@ void rt_mutex_adjust_pi(struct task_struct *task)
556 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
557 * @lock: the rt_mutex to take
558 * @state: the state the task should block in (TASK_INTERRUPTIBLE
559 - * or TASK_UNINTERRUPTIBLE)
560 + * or TASK_UNINTERRUPTIBLE)
561 * @timeout: the pre-initialized and started timer, or NULL for none
562 * @waiter: the pre-initialized rt_mutex_waiter
564 - * lock->wait_lock must be held by the caller.
565 + * Must be called with lock->wait_lock held and interrupts disabled
568 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
569 @@ -1548,13 +1543,13 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
573 - raw_spin_unlock(&lock->wait_lock);
574 + raw_spin_unlock_irq(&lock->wait_lock);
576 debug_rt_mutex_print_deadlock(waiter);
580 - raw_spin_lock(&lock->wait_lock);
581 + raw_spin_lock_irq(&lock->wait_lock);
582 set_current_state(state);
585 @@ -1668,17 +1663,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
586 struct ww_acquire_ctx *ww_ctx)
588 struct rt_mutex_waiter waiter;
589 + unsigned long flags;
592 rt_mutex_init_waiter(&waiter, false);
594 - raw_spin_lock(&lock->wait_lock);
596 + * Technically we could use raw_spin_[un]lock_irq() here, but this can
597 + * be called in early boot if the cmpxchg() fast path is disabled
598 + * (debug, no architecture support). In this case we will acquire the
599 + * rtmutex with lock->wait_lock held. But we cannot unconditionally
600 + * enable interrupts in that early boot case. So we need to use the
601 + * irqsave/restore variants.
603 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
605 /* Try to acquire the lock again: */
606 if (try_to_take_rt_mutex(lock, current, NULL)) {
608 ww_mutex_account_lock(lock, ww_ctx);
609 - raw_spin_unlock(&lock->wait_lock);
610 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
614 @@ -1717,7 +1721,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
616 fixup_rt_mutex_waiters(lock);
618 - raw_spin_unlock(&lock->wait_lock);
619 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
621 /* Remove pending timer: */
622 if (unlikely(timeout))
623 @@ -1733,6 +1737,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
625 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
627 + unsigned long flags;
631 @@ -1744,10 +1749,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
635 - * The mutex has currently no owner. Lock the wait lock and
636 - * try to acquire the lock.
637 + * The mutex has currently no owner. Lock the wait lock and try to
638 + * acquire the lock. We use irqsave here to support early boot calls.
640 - raw_spin_lock(&lock->wait_lock);
641 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
643 ret = try_to_take_rt_mutex(lock, current, NULL);
645 @@ -1757,7 +1762,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
647 fixup_rt_mutex_waiters(lock);
649 - raw_spin_unlock(&lock->wait_lock);
650 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
654 @@ -1770,7 +1775,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
655 struct wake_q_head *wake_q,
656 struct wake_q_head *wake_sleeper_q)
658 - raw_spin_lock(&lock->wait_lock);
659 + unsigned long flags;
661 + /* irqsave required to support early boot calls */
662 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
664 debug_rt_mutex_unlock(lock);
666 @@ -1809,10 +1817,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
668 while (!rt_mutex_has_waiters(lock)) {
669 /* Drops lock->wait_lock ! */
670 - if (unlock_rt_mutex_safe(lock) == true)
671 + if (unlock_rt_mutex_safe(lock, flags) == true)
673 /* Relock the rtmutex and try again */
674 - raw_spin_lock(&lock->wait_lock);
675 + raw_spin_lock_irqsave(&lock->wait_lock, flags);
679 @@ -1823,7 +1831,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
681 mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
683 - raw_spin_unlock(&lock->wait_lock);
684 + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
686 /* check PI boosting */
688 @@ -2135,10 +2143,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
692 - raw_spin_lock(&lock->wait_lock);
693 + raw_spin_lock_irq(&lock->wait_lock);
695 if (try_to_take_rt_mutex(lock, task, NULL)) {
696 - raw_spin_unlock(&lock->wait_lock);
697 + raw_spin_unlock_irq(&lock->wait_lock);
701 @@ -2161,14 +2169,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
702 * PI_REQUEUE_INPROGRESS, so that if the task is waking up
703 * it will know that we are in the process of requeuing it.
705 - raw_spin_lock_irq(&task->pi_lock);
706 + raw_spin_lock(&task->pi_lock);
707 if (task->pi_blocked_on) {
708 - raw_spin_unlock_irq(&task->pi_lock);
709 - raw_spin_unlock(&lock->wait_lock);
710 + raw_spin_unlock(&task->pi_lock);
711 + raw_spin_unlock_irq(&lock->wait_lock);
714 task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
715 - raw_spin_unlock_irq(&task->pi_lock);
716 + raw_spin_unlock(&task->pi_lock);
719 /* We enforce deadlock detection for futexes */
720 @@ -2188,7 +2196,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
721 if (ret && rt_mutex_has_waiters(lock))
722 remove_waiter(lock, waiter);
724 - raw_spin_unlock(&lock->wait_lock);
725 + raw_spin_unlock_irq(&lock->wait_lock);
727 debug_rt_mutex_print_deadlock(waiter);
729 @@ -2236,7 +2244,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
733 - raw_spin_lock(&lock->wait_lock);
734 + raw_spin_lock_irq(&lock->wait_lock);
736 set_current_state(TASK_INTERRUPTIBLE);
738 @@ -2252,7 +2260,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
740 fixup_rt_mutex_waiters(lock);
742 - raw_spin_unlock(&lock->wait_lock);
743 + raw_spin_unlock_irq(&lock->wait_lock);