]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0038-rt-Add-local-irq-locks.patch
rt_patches: required rebase due to printk change
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0038-rt-Add-local-irq-locks.patch
1 From 7532d368d7bbb43b21f60b9f8a4f8880ad85e191 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Mon, 20 Jun 2011 09:03:47 +0200
4 Subject: [PATCH 038/366] rt: Add local irq locks
5
6 Introduce locallock. For !RT this maps to preempt_disable()/
7 local_irq_disable() so there is not much that changes. For RT this will
8 map to a spinlock. This makes preemption possible and locked "ressource"
9 gets the lockdep anotation it wouldn't have otherwise. The locks are
10 recursive for owner == current. Also, all locks user migrate_disable()
11 which ensures that the task is not migrated to another CPU while the lock
12 is held and the owner is preempted.
13
14 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
15 ---
16  include/linux/locallock.h | 264 ++++++++++++++++++++++++++++++++++++++++++++++
17  include/linux/percpu.h    |  29 +++++
18  2 files changed, 293 insertions(+)
19  create mode 100644 include/linux/locallock.h
20
21 diff --git a/include/linux/locallock.h b/include/linux/locallock.h
22 new file mode 100644
23 index 0000000..58056ed
24 --- /dev/null
25 +++ b/include/linux/locallock.h
26 @@ -0,0 +1,264 @@
27 +#ifndef _LINUX_LOCALLOCK_H
28 +#define _LINUX_LOCALLOCK_H
29 +
30 +#include <linux/percpu.h>
31 +#include <linux/spinlock.h>
32 +
33 +#ifdef CONFIG_PREEMPT_RT_BASE
34 +
35 +#ifdef CONFIG_DEBUG_SPINLOCK
36 +# define LL_WARN(cond) WARN_ON(cond)
37 +#else
38 +# define LL_WARN(cond) do { } while (0)
39 +#endif
40 +
41 +/*
42 + * per cpu lock based substitute for local_irq_*()
43 + */
44 +struct local_irq_lock {
45 +       spinlock_t              lock;
46 +       struct task_struct      *owner;
47 +       int                     nestcnt;
48 +       unsigned long           flags;
49 +};
50 +
51 +#define DEFINE_LOCAL_IRQ_LOCK(lvar)                                    \
52 +       DEFINE_PER_CPU(struct local_irq_lock, lvar) = {                 \
53 +               .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
54 +
55 +#define DECLARE_LOCAL_IRQ_LOCK(lvar)                                   \
56 +       DECLARE_PER_CPU(struct local_irq_lock, lvar)
57 +
58 +#define local_irq_lock_init(lvar)                                      \
59 +       do {                                                            \
60 +               int __cpu;                                              \
61 +               for_each_possible_cpu(__cpu)                            \
62 +                       spin_lock_init(&per_cpu(lvar, __cpu).lock);     \
63 +       } while (0)
64 +
65 +/*
66 + * spin_lock|trylock|unlock_local flavour that does not migrate disable
67 + * used for __local_lock|trylock|unlock where get_local_var/put_local_var
68 + * already takes care of the migrate_disable/enable
69 + * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
70 + */
71 +# define spin_lock_local(lock)                 spin_lock(lock)
72 +# define spin_trylock_local(lock)              spin_trylock(lock)
73 +# define spin_unlock_local(lock)               spin_unlock(lock)
74 +
75 +static inline void __local_lock(struct local_irq_lock *lv)
76 +{
77 +       if (lv->owner != current) {
78 +               spin_lock_local(&lv->lock);
79 +               LL_WARN(lv->owner);
80 +               LL_WARN(lv->nestcnt);
81 +               lv->owner = current;
82 +       }
83 +       lv->nestcnt++;
84 +}
85 +
86 +#define local_lock(lvar)                                       \
87 +       do { __local_lock(&get_local_var(lvar)); } while (0)
88 +
89 +static inline int __local_trylock(struct local_irq_lock *lv)
90 +{
91 +       if (lv->owner != current && spin_trylock_local(&lv->lock)) {
92 +               LL_WARN(lv->owner);
93 +               LL_WARN(lv->nestcnt);
94 +               lv->owner = current;
95 +               lv->nestcnt = 1;
96 +               return 1;
97 +       }
98 +       return 0;
99 +}
100 +
101 +#define local_trylock(lvar)                                            \
102 +       ({                                                              \
103 +               int __locked;                                           \
104 +               __locked = __local_trylock(&get_local_var(lvar));       \
105 +               if (!__locked)                                          \
106 +                       put_local_var(lvar);                            \
107 +               __locked;                                               \
108 +       })
109 +
110 +static inline void __local_unlock(struct local_irq_lock *lv)
111 +{
112 +       LL_WARN(lv->nestcnt == 0);
113 +       LL_WARN(lv->owner != current);
114 +       if (--lv->nestcnt)
115 +               return;
116 +
117 +       lv->owner = NULL;
118 +       spin_unlock_local(&lv->lock);
119 +}
120 +
121 +#define local_unlock(lvar)                                     \
122 +       do {                                                    \
123 +               __local_unlock(this_cpu_ptr(&lvar));            \
124 +               put_local_var(lvar);                            \
125 +       } while (0)
126 +
127 +static inline void __local_lock_irq(struct local_irq_lock *lv)
128 +{
129 +       spin_lock_irqsave(&lv->lock, lv->flags);
130 +       LL_WARN(lv->owner);
131 +       LL_WARN(lv->nestcnt);
132 +       lv->owner = current;
133 +       lv->nestcnt = 1;
134 +}
135 +
136 +#define local_lock_irq(lvar)                                           \
137 +       do { __local_lock_irq(&get_local_var(lvar)); } while (0)
138 +
139 +#define local_lock_irq_on(lvar, cpu)                                   \
140 +       do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
141 +
142 +static inline void __local_unlock_irq(struct local_irq_lock *lv)
143 +{
144 +       LL_WARN(!lv->nestcnt);
145 +       LL_WARN(lv->owner != current);
146 +       lv->owner = NULL;
147 +       lv->nestcnt = 0;
148 +       spin_unlock_irq(&lv->lock);
149 +}
150 +
151 +#define local_unlock_irq(lvar)                                         \
152 +       do {                                                            \
153 +               __local_unlock_irq(this_cpu_ptr(&lvar));                \
154 +               put_local_var(lvar);                                    \
155 +       } while (0)
156 +
157 +#define local_unlock_irq_on(lvar, cpu)                                 \
158 +       do {                                                            \
159 +               __local_unlock_irq(&per_cpu(lvar, cpu));                \
160 +       } while (0)
161 +
162 +static inline int __local_lock_irqsave(struct local_irq_lock *lv)
163 +{
164 +       if (lv->owner != current) {
165 +               __local_lock_irq(lv);
166 +               return 0;
167 +       } else {
168 +               lv->nestcnt++;
169 +               return 1;
170 +       }
171 +}
172 +
173 +#define local_lock_irqsave(lvar, _flags)                               \
174 +       do {                                                            \
175 +               if (__local_lock_irqsave(&get_local_var(lvar)))         \
176 +                       put_local_var(lvar);                            \
177 +               _flags = __this_cpu_read(lvar.flags);                   \
178 +       } while (0)
179 +
180 +#define local_lock_irqsave_on(lvar, _flags, cpu)                       \
181 +       do {                                                            \
182 +               __local_lock_irqsave(&per_cpu(lvar, cpu));              \
183 +               _flags = per_cpu(lvar, cpu).flags;                      \
184 +       } while (0)
185 +
186 +static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
187 +                                           unsigned long flags)
188 +{
189 +       LL_WARN(!lv->nestcnt);
190 +       LL_WARN(lv->owner != current);
191 +       if (--lv->nestcnt)
192 +               return 0;
193 +
194 +       lv->owner = NULL;
195 +       spin_unlock_irqrestore(&lv->lock, lv->flags);
196 +       return 1;
197 +}
198 +
199 +#define local_unlock_irqrestore(lvar, flags)                           \
200 +       do {                                                            \
201 +               if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
202 +                       put_local_var(lvar);                            \
203 +       } while (0)
204 +
205 +#define local_unlock_irqrestore_on(lvar, flags, cpu)                   \
206 +       do {                                                            \
207 +               __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags);  \
208 +       } while (0)
209 +
210 +#define local_spin_trylock_irq(lvar, lock)                             \
211 +       ({                                                              \
212 +               int __locked;                                           \
213 +               local_lock_irq(lvar);                                   \
214 +               __locked = spin_trylock(lock);                          \
215 +               if (!__locked)                                          \
216 +                       local_unlock_irq(lvar);                         \
217 +               __locked;                                               \
218 +       })
219 +
220 +#define local_spin_lock_irq(lvar, lock)                                        \
221 +       do {                                                            \
222 +               local_lock_irq(lvar);                                   \
223 +               spin_lock(lock);                                        \
224 +       } while (0)
225 +
226 +#define local_spin_unlock_irq(lvar, lock)                              \
227 +       do {                                                            \
228 +               spin_unlock(lock);                                      \
229 +               local_unlock_irq(lvar);                                 \
230 +       } while (0)
231 +
232 +#define local_spin_lock_irqsave(lvar, lock, flags)                     \
233 +       do {                                                            \
234 +               local_lock_irqsave(lvar, flags);                        \
235 +               spin_lock(lock);                                        \
236 +       } while (0)
237 +
238 +#define local_spin_unlock_irqrestore(lvar, lock, flags)                        \
239 +       do {                                                            \
240 +               spin_unlock(lock);                                      \
241 +               local_unlock_irqrestore(lvar, flags);                   \
242 +       } while (0)
243 +
244 +#define get_locked_var(lvar, var)                                      \
245 +       (*({                                                            \
246 +               local_lock(lvar);                                       \
247 +               this_cpu_ptr(&var);                                     \
248 +       }))
249 +
250 +#define put_locked_var(lvar, var)      local_unlock(lvar);
251 +
252 +#define local_lock_cpu(lvar)                                           \
253 +       ({                                                              \
254 +               local_lock(lvar);                                       \
255 +               smp_processor_id();                                     \
256 +       })
257 +
258 +#define local_unlock_cpu(lvar)                 local_unlock(lvar)
259 +
260 +#else /* PREEMPT_RT_BASE */
261 +
262 +#define DEFINE_LOCAL_IRQ_LOCK(lvar)            __typeof__(const int) lvar
263 +#define DECLARE_LOCAL_IRQ_LOCK(lvar)           extern __typeof__(const int) lvar
264 +
265 +static inline void local_irq_lock_init(int lvar) { }
266 +
267 +#define local_lock(lvar)                       preempt_disable()
268 +#define local_unlock(lvar)                     preempt_enable()
269 +#define local_lock_irq(lvar)                   local_irq_disable()
270 +#define local_unlock_irq(lvar)                 local_irq_enable()
271 +#define local_lock_irqsave(lvar, flags)                local_irq_save(flags)
272 +#define local_unlock_irqrestore(lvar, flags)   local_irq_restore(flags)
273 +
274 +#define local_spin_trylock_irq(lvar, lock)     spin_trylock_irq(lock)
275 +#define local_spin_lock_irq(lvar, lock)                spin_lock_irq(lock)
276 +#define local_spin_unlock_irq(lvar, lock)      spin_unlock_irq(lock)
277 +#define local_spin_lock_irqsave(lvar, lock, flags)     \
278 +       spin_lock_irqsave(lock, flags)
279 +#define local_spin_unlock_irqrestore(lvar, lock, flags)        \
280 +       spin_unlock_irqrestore(lock, flags)
281 +
282 +#define get_locked_var(lvar, var)              get_cpu_var(var)
283 +#define put_locked_var(lvar, var)              put_cpu_var(var)
284 +
285 +#define local_lock_cpu(lvar)                   get_cpu()
286 +#define local_unlock_cpu(lvar)                 put_cpu()
287 +
288 +#endif
289 +
290 +#endif
291 diff --git a/include/linux/percpu.h b/include/linux/percpu.h
292 index caebf2a..53a60a5 100644
293 --- a/include/linux/percpu.h
294 +++ b/include/linux/percpu.h
295 @@ -24,6 +24,35 @@
296          PERCPU_MODULE_RESERVE)
297  #endif
298  
299 +#ifdef CONFIG_PREEMPT_RT_FULL
300 +
301 +#define get_local_var(var) (*({                \
302 +              migrate_disable();       \
303 +              this_cpu_ptr(&var);      }))
304 +
305 +#define put_local_var(var) do {        \
306 +       (void)&(var);           \
307 +       migrate_enable();       \
308 +} while (0)
309 +
310 +# define get_local_ptr(var) ({         \
311 +               migrate_disable();      \
312 +               this_cpu_ptr(var);      })
313 +
314 +# define put_local_ptr(var) do {       \
315 +       (void)(var);                    \
316 +       migrate_enable();               \
317 +} while (0)
318 +
319 +#else
320 +
321 +#define get_local_var(var)     get_cpu_var(var)
322 +#define put_local_var(var)     put_cpu_var(var)
323 +#define get_local_ptr(var)     get_cpu_ptr(var)
324 +#define put_local_ptr(var)     put_cpu_ptr(var)
325 +
326 +#endif
327 +
328  /* minimum unit size, also is the maximum supported allocation size */
329  #define PCPU_MIN_UNIT_SIZE             PFN_ALIGN(32 << 10)
330  
331 -- 
332 1.9.1
333