]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
completion: Use simple wait queues
authorThomas Gleixner <tglx@linutronix.de>
Fri, 11 Jan 2013 10:23:51 +0000 (11:23 +0100)
committerMichal Sojka <sojka@merica.cz>
Sun, 13 Sep 2015 07:47:46 +0000 (09:47 +0200)
Completions have no long lasting callbacks and therefor do not need
the complex waitqueue variant. Use simple waitqueues which reduces the
contention on the waitqueue lock.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/completion.h
include/linux/uprobes.h
kernel/sched/completion.c
kernel/sched/core.c

index 5d5aaae3af433ff62b6e03f107c7dfe88a06b522..3fe8d14c98c06bc6dd1b0e26c8278839bd30dca5 100644 (file)
@@ -7,8 +7,7 @@
  * Atomic wait-for-completion handler data structures.
  * See kernel/sched/completion.c for details.
  */
-
-#include <linux/wait.h>
+#include <linux/wait-simple.h>
 
 /*
  * struct completion - structure used to maintain state for a "completion"
  */
 struct completion {
        unsigned int done;
-       wait_queue_head_t wait;
+       struct swait_head wait;
 };
 
 #define COMPLETION_INITIALIZER(work) \
-       { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+       { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
 
 #define COMPLETION_INITIALIZER_ONSTACK(work) \
        ({ init_completion(&work); work; })
@@ -73,7 +72,7 @@ struct completion {
 static inline void init_completion(struct completion *x)
 {
        x->done = 0;
-       init_waitqueue_head(&x->wait);
+       init_swait_head(&x->wait);
 }
 
 /**
index 60beb5dc7977b78c0badc92b17932a112919bc80..f5a644c649b4995430000c30a95a3cc69aa36b0c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/errno.h>
 #include <linux/rbtree.h>
 #include <linux/types.h>
+#include <linux/wait.h>
 
 struct vm_area_struct;
 struct mm_struct;
index 8d0f35debf35657689908a4b37df7230ba7d6710..45ebcffd9feb08e7d2b1eabb5d661a31e1c120fd 100644 (file)
@@ -30,10 +30,10 @@ void complete(struct completion *x)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&x->wait.lock, flags);
+       raw_spin_lock_irqsave(&x->wait.lock, flags);
        x->done++;
-       __wake_up_locked(&x->wait, TASK_NORMAL, 1);
-       spin_unlock_irqrestore(&x->wait.lock, flags);
+       __swait_wake_locked(&x->wait, TASK_NORMAL, 1);
+       raw_spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete);
 
@@ -50,10 +50,10 @@ void complete_all(struct completion *x)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&x->wait.lock, flags);
+       raw_spin_lock_irqsave(&x->wait.lock, flags);
        x->done += UINT_MAX/2;
-       __wake_up_locked(&x->wait, TASK_NORMAL, 0);
-       spin_unlock_irqrestore(&x->wait.lock, flags);
+       __swait_wake_locked(&x->wait, TASK_NORMAL, 0);
+       raw_spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);
 
@@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
                   long (*action)(long), long timeout, int state)
 {
        if (!x->done) {
-               DECLARE_WAITQUEUE(wait, current);
+               DEFINE_SWAITER(wait);
 
-               __add_wait_queue_tail_exclusive(&x->wait, &wait);
+               swait_prepare_locked(&x->wait, &wait);
                do {
                        if (signal_pending_state(state, current)) {
                                timeout = -ERESTARTSYS;
                                break;
                        }
                        __set_current_state(state);
-                       spin_unlock_irq(&x->wait.lock);
+                       raw_spin_unlock_irq(&x->wait.lock);
                        timeout = action(timeout);
-                       spin_lock_irq(&x->wait.lock);
+                       raw_spin_lock_irq(&x->wait.lock);
                } while (!x->done && timeout);
-               __remove_wait_queue(&x->wait, &wait);
+               swait_finish_locked(&x->wait, &wait);
                if (!x->done)
                        return timeout;
        }
@@ -89,9 +89,9 @@ __wait_for_common(struct completion *x,
 {
        might_sleep();
 
-       spin_lock_irq(&x->wait.lock);
+       raw_spin_lock_irq(&x->wait.lock);
        timeout = do_wait_for_common(x, action, timeout, state);
-       spin_unlock_irq(&x->wait.lock);
+       raw_spin_unlock_irq(&x->wait.lock);
        return timeout;
 }
 
@@ -277,12 +277,12 @@ bool try_wait_for_completion(struct completion *x)
        if (!READ_ONCE(x->done))
                return 0;
 
-       spin_lock_irqsave(&x->wait.lock, flags);
+       raw_spin_lock_irqsave(&x->wait.lock, flags);
        if (!x->done)
                ret = 0;
        else
                x->done--;
-       spin_unlock_irqrestore(&x->wait.lock, flags);
+       raw_spin_unlock_irqrestore(&x->wait.lock, flags);
        return ret;
 }
 EXPORT_SYMBOL(try_wait_for_completion);
@@ -311,7 +311,7 @@ bool completion_done(struct completion *x)
         * after it's acquired the lock.
         */
        smp_rmb();
-       spin_unlock_wait(&x->wait.lock);
+       raw_spin_unlock_wait(&x->wait.lock);
        return true;
 }
 EXPORT_SYMBOL(completion_done);
index 8681d058d50277fcd747e3e8c59b3f5ef41bf9fb..006dfb59884caf2280431a8dc5996b9a028e0c2d 100644 (file)
@@ -2813,7 +2813,10 @@ void migrate_disable(void)
        }
 
 #ifdef CONFIG_SCHED_DEBUG
-       WARN_ON_ONCE(p->migrate_disable_atomic);
+       if (unlikely(p->migrate_disable_atomic)) {
+               tracing_off();
+               WARN_ON_ONCE(1);
+       }
 #endif
 
        if (p->migrate_disable) {
@@ -2844,7 +2847,10 @@ void migrate_enable(void)
        }
 
 #ifdef CONFIG_SCHED_DEBUG
-       WARN_ON_ONCE(p->migrate_disable_atomic);
+       if (unlikely(p->migrate_disable_atomic)) {
+               tracing_off();
+               WARN_ON_ONCE(1);
+       }
 #endif
        WARN_ON_ONCE(p->migrate_disable <= 0);