]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0171-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
Fix memguard and related syscalls
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0171-workqueue-Use-local-irq-lock-instead-of-irq-disable-.patch
1 From b52768fa28b9ce44c7177d87e010771c6f37b298 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Sun, 17 Jul 2011 21:42:26 +0200
4 Subject: [PATCH 171/366] workqueue: Use local irq lock instead of irq disable
5  regions
6
7 Use a local_irq_lock as a replacement for irq off regions. We keep the
8 semantic of irq-off in regard to the pool->lock and remain preemptible.
9
10 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
11 ---
12  kernel/workqueue.c | 31 +++++++++++++++++--------------
13  1 file changed, 17 insertions(+), 14 deletions(-)
14
15 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
16 index 76b85df..efc8cbe 100644
17 --- a/kernel/workqueue.c
18 +++ b/kernel/workqueue.c
19 @@ -48,6 +48,7 @@
20  #include <linux/nodemask.h>
21  #include <linux/moduleparam.h>
22  #include <linux/uaccess.h>
23 +#include <linux/locallock.h>
24  
25  #include "workqueue_internal.h"
26  
27 @@ -331,6 +332,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
28  struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
29  EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
30  
31 +static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
32 +
33  static int worker_thread(void *__worker);
34  static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
35  
36 @@ -1111,9 +1114,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
37                  * As both pwqs and pools are RCU protected, the
38                  * following lock operations are safe.
39                  */
40 -               spin_lock_irq(&pwq->pool->lock);
41 +               local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
42                 put_pwq(pwq);
43 -               spin_unlock_irq(&pwq->pool->lock);
44 +               local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
45         }
46  }
47  
48 @@ -1215,7 +1218,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
49         struct worker_pool *pool;
50         struct pool_workqueue *pwq;
51  
52 -       local_irq_save(*flags);
53 +       local_lock_irqsave(pendingb_lock, *flags);
54  
55         /* try to steal the timer if it exists */
56         if (is_dwork) {
57 @@ -1279,7 +1282,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
58         spin_unlock(&pool->lock);
59  fail:
60         rcu_read_unlock();
61 -       local_irq_restore(*flags);
62 +       local_unlock_irqrestore(pendingb_lock, *flags);
63         if (work_is_canceling(work))
64                 return -ENOENT;
65         cpu_relax();
66 @@ -1351,7 +1354,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
67          * queued or lose PENDING.  Grabbing PENDING and queueing should
68          * happen with IRQ disabled.
69          */
70 -       WARN_ON_ONCE(!irqs_disabled());
71 +       WARN_ON_ONCE_NONRT(!irqs_disabled());
72  
73         debug_work_activate(work);
74  
75 @@ -1456,14 +1459,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
76         bool ret = false;
77         unsigned long flags;
78  
79 -       local_irq_save(flags);
80 +       local_lock_irqsave(pendingb_lock,flags);
81  
82         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
83                 __queue_work(cpu, wq, work);
84                 ret = true;
85         }
86  
87 -       local_irq_restore(flags);
88 +       local_unlock_irqrestore(pendingb_lock, flags);
89         return ret;
90  }
91  EXPORT_SYMBOL(queue_work_on);
92 @@ -1530,14 +1533,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
93         unsigned long flags;
94  
95         /* read the comment in __queue_work() */
96 -       local_irq_save(flags);
97 +       local_lock_irqsave(pendingb_lock, flags);
98  
99         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
100                 __queue_delayed_work(cpu, wq, dwork, delay);
101                 ret = true;
102         }
103  
104 -       local_irq_restore(flags);
105 +       local_unlock_irqrestore(pendingb_lock, flags);
106         return ret;
107  }
108  EXPORT_SYMBOL(queue_delayed_work_on);
109 @@ -1572,7 +1575,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
110  
111         if (likely(ret >= 0)) {
112                 __queue_delayed_work(cpu, wq, dwork, delay);
113 -               local_irq_restore(flags);
114 +               local_unlock_irqrestore(pendingb_lock, flags);
115         }
116  
117         /* -ENOENT from try_to_grab_pending() becomes %true */
118 @@ -2846,7 +2849,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
119  
120         /* tell other tasks trying to grab @work to back off */
121         mark_work_canceling(work);
122 -       local_irq_restore(flags);
123 +       local_unlock_irqrestore(pendingb_lock, flags);
124  
125         flush_work(work);
126         clear_work_data(work);
127 @@ -2901,10 +2904,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
128   */
129  bool flush_delayed_work(struct delayed_work *dwork)
130  {
131 -       local_irq_disable();
132 +       local_lock_irq(pendingb_lock);
133         if (del_timer_sync(&dwork->timer))
134                 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
135 -       local_irq_enable();
136 +       local_unlock_irq(pendingb_lock);
137         return flush_work(&dwork->work);
138  }
139  EXPORT_SYMBOL(flush_delayed_work);
140 @@ -2939,7 +2942,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
141  
142         set_work_pool_and_clear_pending(&dwork->work,
143                                         get_work_pool_id(&dwork->work));
144 -       local_irq_restore(flags);
145 +       local_unlock_irqrestore(pendingb_lock, flags);
146         return ret;
147  }
148  EXPORT_SYMBOL(cancel_delayed_work);
149 -- 
150 1.9.1
151