1 From 7532d368d7bbb43b21f60b9f8a4f8880ad85e191 Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Mon, 20 Jun 2011 09:03:47 +0200
4 Subject: [PATCH 038/366] rt: Add local irq locks
6 Introduce locallock. For !RT this maps to preempt_disable()/
7 local_irq_disable() so there is not much that changes. For RT this will
8 map to a spinlock. This makes preemption possible and locked "ressource"
9 gets the lockdep anotation it wouldn't have otherwise. The locks are
10 recursive for owner == current. Also, all locks user migrate_disable()
11 which ensures that the task is not migrated to another CPU while the lock
12 is held and the owner is preempted.
14 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
16 include/linux/locallock.h | 264 ++++++++++++++++++++++++++++++++++++++++++++++
17 include/linux/percpu.h | 29 +++++
18 2 files changed, 293 insertions(+)
19 create mode 100644 include/linux/locallock.h
21 diff --git a/include/linux/locallock.h b/include/linux/locallock.h
23 index 0000000..58056ed
25 +++ b/include/linux/locallock.h
27 +#ifndef _LINUX_LOCALLOCK_H
28 +#define _LINUX_LOCALLOCK_H
30 +#include <linux/percpu.h>
31 +#include <linux/spinlock.h>
33 +#ifdef CONFIG_PREEMPT_RT_BASE
35 +#ifdef CONFIG_DEBUG_SPINLOCK
36 +# define LL_WARN(cond) WARN_ON(cond)
38 +# define LL_WARN(cond) do { } while (0)
42 + * per cpu lock based substitute for local_irq_*()
44 +struct local_irq_lock {
46 + struct task_struct *owner;
48 + unsigned long flags;
51 +#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
52 + DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
53 + .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
55 +#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
56 + DECLARE_PER_CPU(struct local_irq_lock, lvar)
58 +#define local_irq_lock_init(lvar) \
61 + for_each_possible_cpu(__cpu) \
62 + spin_lock_init(&per_cpu(lvar, __cpu).lock); \
66 + * spin_lock|trylock|unlock_local flavour that does not migrate disable
67 + * used for __local_lock|trylock|unlock where get_local_var/put_local_var
68 + * already takes care of the migrate_disable/enable
69 + * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
71 +# define spin_lock_local(lock) spin_lock(lock)
72 +# define spin_trylock_local(lock) spin_trylock(lock)
73 +# define spin_unlock_local(lock) spin_unlock(lock)
75 +static inline void __local_lock(struct local_irq_lock *lv)
77 + if (lv->owner != current) {
78 + spin_lock_local(&lv->lock);
80 + LL_WARN(lv->nestcnt);
81 + lv->owner = current;
86 +#define local_lock(lvar) \
87 + do { __local_lock(&get_local_var(lvar)); } while (0)
89 +static inline int __local_trylock(struct local_irq_lock *lv)
91 + if (lv->owner != current && spin_trylock_local(&lv->lock)) {
93 + LL_WARN(lv->nestcnt);
94 + lv->owner = current;
101 +#define local_trylock(lvar) \
104 + __locked = __local_trylock(&get_local_var(lvar)); \
106 + put_local_var(lvar); \
110 +static inline void __local_unlock(struct local_irq_lock *lv)
112 + LL_WARN(lv->nestcnt == 0);
113 + LL_WARN(lv->owner != current);
118 + spin_unlock_local(&lv->lock);
121 +#define local_unlock(lvar) \
123 + __local_unlock(this_cpu_ptr(&lvar)); \
124 + put_local_var(lvar); \
127 +static inline void __local_lock_irq(struct local_irq_lock *lv)
129 + spin_lock_irqsave(&lv->lock, lv->flags);
130 + LL_WARN(lv->owner);
131 + LL_WARN(lv->nestcnt);
132 + lv->owner = current;
136 +#define local_lock_irq(lvar) \
137 + do { __local_lock_irq(&get_local_var(lvar)); } while (0)
139 +#define local_lock_irq_on(lvar, cpu) \
140 + do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
142 +static inline void __local_unlock_irq(struct local_irq_lock *lv)
144 + LL_WARN(!lv->nestcnt);
145 + LL_WARN(lv->owner != current);
148 + spin_unlock_irq(&lv->lock);
151 +#define local_unlock_irq(lvar) \
153 + __local_unlock_irq(this_cpu_ptr(&lvar)); \
154 + put_local_var(lvar); \
157 +#define local_unlock_irq_on(lvar, cpu) \
159 + __local_unlock_irq(&per_cpu(lvar, cpu)); \
162 +static inline int __local_lock_irqsave(struct local_irq_lock *lv)
164 + if (lv->owner != current) {
165 + __local_lock_irq(lv);
173 +#define local_lock_irqsave(lvar, _flags) \
175 + if (__local_lock_irqsave(&get_local_var(lvar))) \
176 + put_local_var(lvar); \
177 + _flags = __this_cpu_read(lvar.flags); \
180 +#define local_lock_irqsave_on(lvar, _flags, cpu) \
182 + __local_lock_irqsave(&per_cpu(lvar, cpu)); \
183 + _flags = per_cpu(lvar, cpu).flags; \
186 +static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
187 + unsigned long flags)
189 + LL_WARN(!lv->nestcnt);
190 + LL_WARN(lv->owner != current);
195 + spin_unlock_irqrestore(&lv->lock, lv->flags);
199 +#define local_unlock_irqrestore(lvar, flags) \
201 + if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \
202 + put_local_var(lvar); \
205 +#define local_unlock_irqrestore_on(lvar, flags, cpu) \
207 + __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
210 +#define local_spin_trylock_irq(lvar, lock) \
213 + local_lock_irq(lvar); \
214 + __locked = spin_trylock(lock); \
216 + local_unlock_irq(lvar); \
220 +#define local_spin_lock_irq(lvar, lock) \
222 + local_lock_irq(lvar); \
226 +#define local_spin_unlock_irq(lvar, lock) \
228 + spin_unlock(lock); \
229 + local_unlock_irq(lvar); \
232 +#define local_spin_lock_irqsave(lvar, lock, flags) \
234 + local_lock_irqsave(lvar, flags); \
238 +#define local_spin_unlock_irqrestore(lvar, lock, flags) \
240 + spin_unlock(lock); \
241 + local_unlock_irqrestore(lvar, flags); \
244 +#define get_locked_var(lvar, var) \
246 + local_lock(lvar); \
247 + this_cpu_ptr(&var); \
250 +#define put_locked_var(lvar, var) local_unlock(lvar);
252 +#define local_lock_cpu(lvar) \
254 + local_lock(lvar); \
255 + smp_processor_id(); \
258 +#define local_unlock_cpu(lvar) local_unlock(lvar)
260 +#else /* PREEMPT_RT_BASE */
262 +#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
263 +#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
265 +static inline void local_irq_lock_init(int lvar) { }
267 +#define local_lock(lvar) preempt_disable()
268 +#define local_unlock(lvar) preempt_enable()
269 +#define local_lock_irq(lvar) local_irq_disable()
270 +#define local_unlock_irq(lvar) local_irq_enable()
271 +#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
272 +#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
274 +#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
275 +#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
276 +#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
277 +#define local_spin_lock_irqsave(lvar, lock, flags) \
278 + spin_lock_irqsave(lock, flags)
279 +#define local_spin_unlock_irqrestore(lvar, lock, flags) \
280 + spin_unlock_irqrestore(lock, flags)
282 +#define get_locked_var(lvar, var) get_cpu_var(var)
283 +#define put_locked_var(lvar, var) put_cpu_var(var)
285 +#define local_lock_cpu(lvar) get_cpu()
286 +#define local_unlock_cpu(lvar) put_cpu()
291 diff --git a/include/linux/percpu.h b/include/linux/percpu.h
292 index caebf2a..53a60a5 100644
293 --- a/include/linux/percpu.h
294 +++ b/include/linux/percpu.h
296 PERCPU_MODULE_RESERVE)
299 +#ifdef CONFIG_PREEMPT_RT_FULL
301 +#define get_local_var(var) (*({ \
302 + migrate_disable(); \
303 + this_cpu_ptr(&var); }))
305 +#define put_local_var(var) do { \
307 + migrate_enable(); \
310 +# define get_local_ptr(var) ({ \
311 + migrate_disable(); \
312 + this_cpu_ptr(var); })
314 +# define put_local_ptr(var) do { \
316 + migrate_enable(); \
321 +#define get_local_var(var) get_cpu_var(var)
322 +#define put_local_var(var) put_cpu_var(var)
323 +#define get_local_ptr(var) get_cpu_ptr(var)
324 +#define put_local_ptr(var) put_cpu_ptr(var)
328 /* minimum unit size, also is the maximum supported allocation size */
329 #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)