1 From b52768fa28b9ce44c7177d87e010771c6f37b298 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Sun, 17 Jul 2011 21:42:26 +0200
4 Subject: [PATCH 171/366] workqueue: Use local irq lock instead of irq disable
7 Use a local_irq_lock as a replacement for irq off regions. We keep the
8 semantic of irq-off in regard to the pool->lock and remain preemptible.
10 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
12 kernel/workqueue.c | 31 +++++++++++++++++--------------
13 1 file changed, 17 insertions(+), 14 deletions(-)
15 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
16 index 76b85df..efc8cbe 100644
17 --- a/kernel/workqueue.c
18 +++ b/kernel/workqueue.c
20 #include <linux/nodemask.h>
21 #include <linux/moduleparam.h>
22 #include <linux/uaccess.h>
23 +#include <linux/locallock.h>
25 #include "workqueue_internal.h"
27 @@ -331,6 +332,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
28 struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
29 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
31 +static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
33 static int worker_thread(void *__worker);
34 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
36 @@ -1111,9 +1114,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
37 * As both pwqs and pools are RCU protected, the
38 * following lock operations are safe.
40 - spin_lock_irq(&pwq->pool->lock);
41 + local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
43 - spin_unlock_irq(&pwq->pool->lock);
44 + local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
48 @@ -1215,7 +1218,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
49 struct worker_pool *pool;
50 struct pool_workqueue *pwq;
52 - local_irq_save(*flags);
53 + local_lock_irqsave(pendingb_lock, *flags);
55 /* try to steal the timer if it exists */
57 @@ -1279,7 +1282,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
58 spin_unlock(&pool->lock);
61 - local_irq_restore(*flags);
62 + local_unlock_irqrestore(pendingb_lock, *flags);
63 if (work_is_canceling(work))
66 @@ -1351,7 +1354,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
67 * queued or lose PENDING. Grabbing PENDING and queueing should
68 * happen with IRQ disabled.
70 - WARN_ON_ONCE(!irqs_disabled());
71 + WARN_ON_ONCE_NONRT(!irqs_disabled());
73 debug_work_activate(work);
75 @@ -1456,14 +1459,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
79 - local_irq_save(flags);
80 + local_lock_irqsave(pendingb_lock,flags);
82 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
83 __queue_work(cpu, wq, work);
87 - local_irq_restore(flags);
88 + local_unlock_irqrestore(pendingb_lock, flags);
91 EXPORT_SYMBOL(queue_work_on);
92 @@ -1530,14 +1533,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
95 /* read the comment in __queue_work() */
96 - local_irq_save(flags);
97 + local_lock_irqsave(pendingb_lock, flags);
99 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
100 __queue_delayed_work(cpu, wq, dwork, delay);
104 - local_irq_restore(flags);
105 + local_unlock_irqrestore(pendingb_lock, flags);
108 EXPORT_SYMBOL(queue_delayed_work_on);
109 @@ -1572,7 +1575,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
111 if (likely(ret >= 0)) {
112 __queue_delayed_work(cpu, wq, dwork, delay);
113 - local_irq_restore(flags);
114 + local_unlock_irqrestore(pendingb_lock, flags);
117 /* -ENOENT from try_to_grab_pending() becomes %true */
118 @@ -2846,7 +2849,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
120 /* tell other tasks trying to grab @work to back off */
121 mark_work_canceling(work);
122 - local_irq_restore(flags);
123 + local_unlock_irqrestore(pendingb_lock, flags);
126 clear_work_data(work);
127 @@ -2901,10 +2904,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
129 bool flush_delayed_work(struct delayed_work *dwork)
131 - local_irq_disable();
132 + local_lock_irq(pendingb_lock);
133 if (del_timer_sync(&dwork->timer))
134 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
135 - local_irq_enable();
136 + local_unlock_irq(pendingb_lock);
137 return flush_work(&dwork->work);
139 EXPORT_SYMBOL(flush_delayed_work);
140 @@ -2939,7 +2942,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
142 set_work_pool_and_clear_pending(&dwork->work,
143 get_work_pool_id(&dwork->work));
144 - local_irq_restore(flags);
145 + local_unlock_irqrestore(pendingb_lock, flags);
148 EXPORT_SYMBOL(cancel_delayed_work);