2 * Simple waitqueues without fancy flags and callbacks
4 * (C) 2011 Thomas Gleixner <tglx@linutronix.de>
6 * Based on kernel/wait.c
8 * For licencing details see kernel-base/COPYING
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/wait-simple.h>
15 /* Adds w to head->list. Must be called with head->lock locked. */
16 static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
18 list_add(&w->node, &head->list);
19 /* We can't let the condition leak before the setting of head */
23 /* Removes w from head->list. Must be called with head->lock locked. */
24 static inline void __swait_dequeue(struct swaiter *w)
26 list_del_init(&w->node);
29 /* Check whether a head has waiters enqueued */
30 static inline bool swait_head_has_waiters(struct swait_head *h)
32 /* Make sure the condition is visible before checking list_empty() */
34 return !list_empty(&h->list);
37 void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
39 raw_spin_lock_init(&head->lock);
40 lockdep_set_class(&head->lock, key);
41 INIT_LIST_HEAD(&head->list);
43 EXPORT_SYMBOL(__init_swait_head);
45 void swait_prepare_locked(struct swait_head *head, struct swaiter *w)
48 if (list_empty(&w->node))
49 __swait_enqueue(head, w);
52 void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
56 raw_spin_lock_irqsave(&head->lock, flags);
57 swait_prepare_locked(head, w);
58 __set_current_state(state);
59 raw_spin_unlock_irqrestore(&head->lock, flags);
61 EXPORT_SYMBOL(swait_prepare);
63 void swait_finish_locked(struct swait_head *head, struct swaiter *w)
65 __set_current_state(TASK_RUNNING);
70 void swait_finish(struct swait_head *head, struct swaiter *w)
74 __set_current_state(TASK_RUNNING);
76 raw_spin_lock_irqsave(&head->lock, flags);
78 raw_spin_unlock_irqrestore(&head->lock, flags);
81 EXPORT_SYMBOL(swait_finish);
84 __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num)
86 struct swaiter *curr, *next;
89 list_for_each_entry_safe(curr, next, &head->list, node) {
90 if (wake_up_state(curr->task, state)) {
91 __swait_dequeue(curr);
93 * The waiting task can free the waiter as
94 * soon as curr->task = NULL is written,
95 * without taking any locks. A memory barrier
96 * is required here to prevent the following
97 * store to curr->task from getting ahead of
98 * the dequeue operation.
110 __swait_wake(struct swait_head *head, unsigned int state, unsigned int num)
115 if (!swait_head_has_waiters(head))
118 raw_spin_lock_irqsave(&head->lock, flags);
119 woken = __swait_wake_locked(head, state, num);
120 raw_spin_unlock_irqrestore(&head->lock, flags);
123 EXPORT_SYMBOL(__swait_wake);