]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
rwlock: disable migration before taking a lock
authorSteven Rostedt <rostedt@goodmis.org>
Wed, 30 Apr 2014 00:13:08 +0000 (20:13 -0400)
committerMichal Sojka <sojka@merica.cz>
Sun, 13 Sep 2015 07:47:23 +0000 (09:47 +0200)
If there's no complaints about it. I'm going to add this to the 3.12-rt
stable tree. As without it, it fails horribly with the cpu hotplug
stress test, and I wont release a stable kernel that does that.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/locking/rt.c

index 906322a61ffd11fc39b2a1d8a4f4fe62b41c0e43..2e7efaf1d8b212c07c9f747f6e3d0c647fcf4b3b 100644 (file)
@@ -180,12 +180,14 @@ EXPORT_SYMBOL(_mutex_unlock);
  */
 int __lockfunc rt_write_trylock(rwlock_t *rwlock)
 {
-       int ret = rt_mutex_trylock(&rwlock->lock);
+       int ret;
 
-       if (ret) {
+       migrate_disable();
+       ret = rt_mutex_trylock(&rwlock->lock);
+       if (ret)
                rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
-               migrate_disable();
-       }
+       else
+               migrate_enable();
 
        return ret;
 }
@@ -212,9 +214,10 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
         * write locked.
         */
        if (rt_mutex_owner(lock) != current) {
+               migrate_disable();
                ret = rt_mutex_trylock(lock);
-               if (ret)
-                       migrate_disable();
+               if (!ret)
+                       migrate_enable();
 
        } else if (!rwlock->read_depth) {
                ret = 0;
@@ -247,8 +250,8 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock)
         * recursive read locks succeed when current owns the lock
         */
        if (rt_mutex_owner(lock) != current) {
-               __rt_spin_lock(lock);
                migrate_disable();
+               __rt_spin_lock(lock);
        }
        rwlock->read_depth++;
 }