]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
rtmutex: use a trylock for waiter lock in trylock
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>
Fri, 15 Nov 2013 14:46:50 +0000 (15:46 +0100)
committerMichal Sojka <sojka@merica.cz>
Sun, 13 Sep 2015 07:47:25 +0000 (09:47 +0200)
Mike Galbraith captered the following:
| >#11 [ffff88017b243e90] _raw_spin_lock at ffffffff815d2596
| >#12 [ffff88017b243e90] rt_mutex_trylock at ffffffff815d15be
| >#13 [ffff88017b243eb0] get_next_timer_interrupt at ffffffff81063b42
| >#14 [ffff88017b243f00] tick_nohz_stop_sched_tick at ffffffff810bd1fd
| >#15 [ffff88017b243f70] tick_nohz_irq_exit at ffffffff810bd7d2
| >#16 [ffff88017b243f90] irq_exit at ffffffff8105b02d
| >#17 [ffff88017b243fb0] reschedule_interrupt at ffffffff815db3dd
| >--- <IRQ stack> ---
| >#18 [ffff88017a2a9bc8] reschedule_interrupt at ffffffff815db3dd
| >    [exception RIP: task_blocks_on_rt_mutex+51]
| >#19 [ffff88017a2a9ce0] rt_spin_lock_slowlock at ffffffff815d183c
| >#20 [ffff88017a2a9da0] lock_timer_base.isra.35 at ffffffff81061cbf
| >#21 [ffff88017a2a9dd0] schedule_timeout at ffffffff815cf1ce
| >#22 [ffff88017a2a9e50] rcu_gp_kthread at ffffffff810f9bbb
| >#23 [ffff88017a2a9ed0] kthread at ffffffff810796d5
| >#24 [ffff88017a2a9f50] ret_from_fork at ffffffff815da04c

lock_timer_base() does a try_lock() which deadlocks on the waiter lock
not the lock itself.
This patch takes the waiter_lock with trylock so it should work from interrupt
context as well. If the fastpath doesn't work and the waiter_lock itself is
taken then it seems that the lock itself taken.
This patch also adds "rt_spin_unlock_after_trylock_in_irq" to keep lockdep
happy. If we managed to take the wait_lock in the first place we should also
be able to take it in the unlock path.

Cc: stable-rt@vger.kernel.org
Reported-by: Mike Galbraith <bitbucket@online.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/spinlock_rt.h
kernel/locking/rtmutex.c
kernel/time/timer.c

index ca08d3b2d9e88f6ddd21544350668799cc7b2315..af1cfd5d77d7914632f39cc90a9e0efa9fdb2069 100644 (file)
@@ -22,6 +22,7 @@ extern void __lockfunc rt_spin_lock(spinlock_t *lock);
 extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
 extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
 extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
+extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
 extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
 extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
 extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
index f7ab4e2c34fa559c5ddf999c5bf1aaea9c570945..0f4188876c6440f3838a0be133b06e326ad139ed 100644 (file)
@@ -1063,10 +1063,8 @@ static void wakeup_next_waiter(struct rt_mutex *lock);
 /*
  * Slow path to release a rt_mutex spin_lock style
  */
-static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock)
 {
-       raw_spin_lock(&lock->wait_lock);
-
        debug_rt_mutex_unlock(lock);
 
        rt_mutex_deadlock_account_unlock(current);
@@ -1085,6 +1083,23 @@ static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
        rt_mutex_adjust_prio(current);
 }
 
+static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
+       raw_spin_lock(&lock->wait_lock);
+       __rt_spin_lock_slowunlock(lock);
+}
+
+static void  noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock)
+{
+       int ret;
+
+       do {
+               ret = raw_spin_trylock(&lock->wait_lock);
+       } while (!ret);
+
+       __rt_spin_lock_slowunlock(lock);
+}
+
 void __lockfunc rt_spin_lock(spinlock_t *lock)
 {
        rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
@@ -1115,6 +1130,13 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock)
 }
 EXPORT_SYMBOL(rt_spin_unlock);
 
+void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock)
+{
+       /* NOTE: we always pass in '1' for nested, for simplicity */
+       spin_release(&lock->dep_map, 1, _RET_IP_);
+       rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq);
+}
+
 void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
 {
        rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
@@ -1728,7 +1750,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
         * The mutex has currently no owner. Lock the wait lock and
         * try to acquire the lock.
         */
-       raw_spin_lock(&lock->wait_lock);
+       if (!raw_spin_trylock(&lock->wait_lock))
+               return 0;
 
        ret = try_to_take_rt_mutex(lock, current, NULL);
 
index 6e2b00b74e70064cfa07714328a856c8be4a75fb..03a6aacfb478b2afd53b0da88c42c84f11514d05 100644 (file)
@@ -1425,7 +1425,7 @@ unsigned long get_next_timer_interrupt(unsigned long now)
                expires = base->next_timer;
        }
 #ifdef CONFIG_PREEMPT_RT_FULL
-       rt_spin_unlock(&base->lock);
+       rt_spin_unlock_after_trylock_in_irq(&base->lock);
 #else
        spin_unlock(&base->lock);
 #endif