]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - kernel/locking/rt.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / kernel / locking / rt.c
1 /*
2  * kernel/rt.c
3  *
4  * Real-Time Preemption Support
5  *
6  * started by Ingo Molnar:
7  *
8  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
10  *
11  * historic credit for proving that Linux spinlocks can be implemented via
12  * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
13  * and others) who prototyped it on 2.4 and did lots of comparative
14  * research and analysis; TimeSys, for proving that you can implement a
15  * fully preemptible kernel via the use of IRQ threading and mutexes;
16  * Bill Huey for persuasively arguing on lkml that the mutex model is the
17  * right one; and to MontaVista, who ported pmutexes to 2.6.
18  *
19  * This code is a from-scratch implementation and is not based on pmutexes,
20  * but the idea of converting spinlocks to mutexes is used here too.
21  *
22  * lock debugging, locking tree, deadlock detection:
23  *
24  *  Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
25  *  Released under the General Public License (GPL).
26  *
27  * Includes portions of the generic R/W semaphore implementation from:
28  *
29  *  Copyright (c) 2001   David Howells (dhowells@redhat.com).
30  *  - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
31  *  - Derived also from comments by Linus
32  *
33  * Pending ownership of locks and ownership stealing:
34  *
35  *  Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
36  *
37  *   (also by Steven Rostedt)
38  *    - Converted single pi_lock to individual task locks.
39  *
40  * By Esben Nielsen:
41  *    Doing priority inheritance with help of the scheduler.
42  *
43  *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
44  *  - major rework based on Esben Nielsens initial patch
45  *  - replaced thread_info references by task_struct refs
46  *  - removed task->pending_owner dependency
47  *  - BKL drop/reacquire for semaphore style locks to avoid deadlocks
48  *    in the scheduler return path as discussed with Steven Rostedt
49  *
50  *  Copyright (C) 2006, Kihon Technologies Inc.
51  *    Steven Rostedt <rostedt@goodmis.org>
52  *  - debugged and patched Thomas Gleixner's rework.
53  *  - added back the cmpxchg to the rework.
54  *  - turned atomic require back on for SMP.
55  */
56
57 #include <linux/spinlock.h>
58 #include <linux/rtmutex.h>
59 #include <linux/sched.h>
60 #include <linux/delay.h>
61 #include <linux/module.h>
62 #include <linux/kallsyms.h>
63 #include <linux/syscalls.h>
64 #include <linux/interrupt.h>
65 #include <linux/plist.h>
66 #include <linux/fs.h>
67 #include <linux/futex.h>
68 #include <linux/hrtimer.h>
69
70 #include "rtmutex_common.h"
71
72 /*
73  * struct mutex functions
74  */
75 void __mutex_do_init(struct mutex *mutex, const char *name,
76                      struct lock_class_key *key)
77 {
78 #ifdef CONFIG_DEBUG_LOCK_ALLOC
79         /*
80          * Make sure we are not reinitializing a held lock:
81          */
82         debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
83         lockdep_init_map(&mutex->dep_map, name, key, 0);
84 #endif
85         mutex->lock.save_state = 0;
86 }
87 EXPORT_SYMBOL(__mutex_do_init);
88
89 void __lockfunc _mutex_lock(struct mutex *lock)
90 {
91         mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
92         rt_mutex_lock(&lock->lock);
93 }
94 EXPORT_SYMBOL(_mutex_lock);
95
96 int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
97 {
98         int ret;
99
100         mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
101         ret = rt_mutex_lock_interruptible(&lock->lock);
102         if (ret)
103                 mutex_release(&lock->dep_map, 1, _RET_IP_);
104         return ret;
105 }
106 EXPORT_SYMBOL(_mutex_lock_interruptible);
107
108 int __lockfunc _mutex_lock_killable(struct mutex *lock)
109 {
110         int ret;
111
112         mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
113         ret = rt_mutex_lock_killable(&lock->lock);
114         if (ret)
115                 mutex_release(&lock->dep_map, 1, _RET_IP_);
116         return ret;
117 }
118 EXPORT_SYMBOL(_mutex_lock_killable);
119
120 #ifdef CONFIG_DEBUG_LOCK_ALLOC
121 void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
122 {
123         mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
124         rt_mutex_lock(&lock->lock);
125 }
126 EXPORT_SYMBOL(_mutex_lock_nested);
127
128 void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
129 {
130         mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
131         rt_mutex_lock(&lock->lock);
132 }
133 EXPORT_SYMBOL(_mutex_lock_nest_lock);
134
135 int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
136 {
137         int ret;
138
139         mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
140         ret = rt_mutex_lock_interruptible(&lock->lock);
141         if (ret)
142                 mutex_release(&lock->dep_map, 1, _RET_IP_);
143         return ret;
144 }
145 EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
146
147 int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
148 {
149         int ret;
150
151         mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
152         ret = rt_mutex_lock_killable(&lock->lock);
153         if (ret)
154                 mutex_release(&lock->dep_map, 1, _RET_IP_);
155         return ret;
156 }
157 EXPORT_SYMBOL(_mutex_lock_killable_nested);
158 #endif
159
160 int __lockfunc _mutex_trylock(struct mutex *lock)
161 {
162         int ret = rt_mutex_trylock(&lock->lock);
163
164         if (ret)
165                 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
166
167         return ret;
168 }
169 EXPORT_SYMBOL(_mutex_trylock);
170
171 void __lockfunc _mutex_unlock(struct mutex *lock)
172 {
173         mutex_release(&lock->dep_map, 1, _RET_IP_);
174         rt_mutex_unlock(&lock->lock);
175 }
176 EXPORT_SYMBOL(_mutex_unlock);
177
178 /*
179  * rwlock_t functions
180  */
181 int __lockfunc rt_write_trylock(rwlock_t *rwlock)
182 {
183         int ret;
184
185         migrate_disable();
186         ret = rt_mutex_trylock(&rwlock->lock);
187         if (ret)
188                 rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
189         else
190                 migrate_enable();
191
192         return ret;
193 }
194 EXPORT_SYMBOL(rt_write_trylock);
195
196 int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
197 {
198         int ret;
199
200         *flags = 0;
201         ret = rt_write_trylock(rwlock);
202         return ret;
203 }
204 EXPORT_SYMBOL(rt_write_trylock_irqsave);
205
206 int __lockfunc rt_read_trylock(rwlock_t *rwlock)
207 {
208         struct rt_mutex *lock = &rwlock->lock;
209         int ret = 1;
210
211         /*
212          * recursive read locks succeed when current owns the lock,
213          * but not when read_depth == 0 which means that the lock is
214          * write locked.
215          */
216         if (rt_mutex_owner(lock) != current) {
217                 migrate_disable();
218                 ret = rt_mutex_trylock(lock);
219                 if (ret)
220                         rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
221                 else
222                         migrate_enable();
223
224         } else if (!rwlock->read_depth) {
225                 ret = 0;
226         }
227
228         if (ret)
229                 rwlock->read_depth++;
230
231         return ret;
232 }
233 EXPORT_SYMBOL(rt_read_trylock);
234
235 void __lockfunc rt_write_lock(rwlock_t *rwlock)
236 {
237         rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
238         __rt_spin_lock(&rwlock->lock);
239 }
240 EXPORT_SYMBOL(rt_write_lock);
241
242 void __lockfunc rt_read_lock(rwlock_t *rwlock)
243 {
244         struct rt_mutex *lock = &rwlock->lock;
245
246
247         /*
248          * recursive read locks succeed when current owns the lock
249          */
250         if (rt_mutex_owner(lock) != current) {
251                 rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
252                 __rt_spin_lock(lock);
253         }
254         rwlock->read_depth++;
255 }
256
257 EXPORT_SYMBOL(rt_read_lock);
258
259 void __lockfunc rt_write_unlock(rwlock_t *rwlock)
260 {
261         /* NOTE: we always pass in '1' for nested, for simplicity */
262         rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
263         __rt_spin_unlock(&rwlock->lock);
264         migrate_enable();
265 }
266 EXPORT_SYMBOL(rt_write_unlock);
267
268 void __lockfunc rt_read_unlock(rwlock_t *rwlock)
269 {
270         /* Release the lock only when read_depth is down to 0 */
271         if (--rwlock->read_depth == 0) {
272                 rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
273                 __rt_spin_unlock(&rwlock->lock);
274                 migrate_enable();
275         }
276 }
277 EXPORT_SYMBOL(rt_read_unlock);
278
279 unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
280 {
281         rt_write_lock(rwlock);
282
283         return 0;
284 }
285 EXPORT_SYMBOL(rt_write_lock_irqsave);
286
287 unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
288 {
289         rt_read_lock(rwlock);
290
291         return 0;
292 }
293 EXPORT_SYMBOL(rt_read_lock_irqsave);
294
295 void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
296 {
297 #ifdef CONFIG_DEBUG_LOCK_ALLOC
298         /*
299          * Make sure we are not reinitializing a held lock:
300          */
301         debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
302         lockdep_init_map(&rwlock->dep_map, name, key, 0);
303 #endif
304         rwlock->lock.save_state = 1;
305         rwlock->read_depth = 0;
306 }
307 EXPORT_SYMBOL(__rt_rwlock_init);
308
309 /*
310  * rw_semaphores
311  */
312
313 void  rt_up_write(struct rw_semaphore *rwsem)
314 {
315         rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
316         rt_mutex_unlock(&rwsem->lock);
317 }
318 EXPORT_SYMBOL(rt_up_write);
319
320 void __rt_up_read(struct rw_semaphore *rwsem)
321 {
322         if (--rwsem->read_depth == 0)
323                 rt_mutex_unlock(&rwsem->lock);
324 }
325
326 void  rt_up_read(struct rw_semaphore *rwsem)
327 {
328         rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
329         __rt_up_read(rwsem);
330 }
331 EXPORT_SYMBOL(rt_up_read);
332
333 /*
334  * downgrade a write lock into a read lock
335  * - just wake up any readers at the front of the queue
336  */
337 void  rt_downgrade_write(struct rw_semaphore *rwsem)
338 {
339         BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
340         rwsem->read_depth = 1;
341 }
342 EXPORT_SYMBOL(rt_downgrade_write);
343
344 int  rt_down_write_trylock(struct rw_semaphore *rwsem)
345 {
346         int ret = rt_mutex_trylock(&rwsem->lock);
347
348         if (ret)
349                 rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
350         return ret;
351 }
352 EXPORT_SYMBOL(rt_down_write_trylock);
353
354 void  rt_down_write(struct rw_semaphore *rwsem)
355 {
356         rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
357         rt_mutex_lock(&rwsem->lock);
358 }
359 EXPORT_SYMBOL(rt_down_write);
360
361 int rt_down_write_killable(struct rw_semaphore *rwsem)
362 {
363         int ret;
364
365         rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
366         ret = rt_mutex_lock_killable(&rwsem->lock);
367         if (ret)
368                 rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
369         return ret;
370 }
371 EXPORT_SYMBOL(rt_down_write_killable);
372
373 int rt_down_write_killable_nested(struct rw_semaphore *rwsem, int subclass)
374 {
375         int ret;
376
377         rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
378         ret = rt_mutex_lock_killable(&rwsem->lock);
379         if (ret)
380                 rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
381         return ret;
382 }
383 EXPORT_SYMBOL(rt_down_write_killable_nested);
384
385 void  rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
386 {
387         rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
388         rt_mutex_lock(&rwsem->lock);
389 }
390 EXPORT_SYMBOL(rt_down_write_nested);
391
392 void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
393                                struct lockdep_map *nest)
394 {
395         rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
396         rt_mutex_lock(&rwsem->lock);
397 }
398 EXPORT_SYMBOL(rt_down_write_nested_lock);
399
400 int rt__down_read_trylock(struct rw_semaphore *rwsem)
401 {
402         struct rt_mutex *lock = &rwsem->lock;
403         int ret = 1;
404
405         /*
406          * recursive read locks succeed when current owns the rwsem,
407          * but not when read_depth == 0 which means that the rwsem is
408          * write locked.
409          */
410         if (rt_mutex_owner(lock) != current)
411                 ret = rt_mutex_trylock(&rwsem->lock);
412         else if (!rwsem->read_depth)
413                 ret = 0;
414
415         if (ret)
416                 rwsem->read_depth++;
417         return ret;
418
419 }
420
421 int  rt_down_read_trylock(struct rw_semaphore *rwsem)
422 {
423         int ret;
424
425         ret = rt__down_read_trylock(rwsem);
426         if (ret)
427                 rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
428
429         return ret;
430 }
431 EXPORT_SYMBOL(rt_down_read_trylock);
432
433 void rt__down_read(struct rw_semaphore *rwsem)
434 {
435         struct rt_mutex *lock = &rwsem->lock;
436
437         if (rt_mutex_owner(lock) != current)
438                 rt_mutex_lock(&rwsem->lock);
439         rwsem->read_depth++;
440 }
441 EXPORT_SYMBOL(rt__down_read);
442
443 static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
444 {
445         rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
446         rt__down_read(rwsem);
447 }
448
449 void  rt_down_read(struct rw_semaphore *rwsem)
450 {
451         __rt_down_read(rwsem, 0);
452 }
453 EXPORT_SYMBOL(rt_down_read);
454
455 void  rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
456 {
457         __rt_down_read(rwsem, subclass);
458 }
459 EXPORT_SYMBOL(rt_down_read_nested);
460
461 void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
462                               struct lock_class_key *key)
463 {
464 #ifdef CONFIG_DEBUG_LOCK_ALLOC
465         /*
466          * Make sure we are not reinitializing a held lock:
467          */
468         debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
469         lockdep_init_map(&rwsem->dep_map, name, key, 0);
470 #endif
471         rwsem->read_depth = 0;
472         rwsem->lock.save_state = 0;
473 }
474 EXPORT_SYMBOL(__rt_rwsem_init);
475
476 /**
477  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
478  * @cnt: the atomic which we are to dec
479  * @lock: the mutex to return holding if we dec to 0
480  *
481  * return true and hold lock if we dec to 0, return false otherwise
482  */
483 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
484 {
485         /* dec if we can't possibly hit 0 */
486         if (atomic_add_unless(cnt, -1, 1))
487                 return 0;
488         /* we might hit 0, so take the lock */
489         mutex_lock(lock);
490         if (!atomic_dec_and_test(cnt)) {
491                 /* when we actually did the dec, we didn't hit 0 */
492                 mutex_unlock(lock);
493                 return 0;
494         }
495         /* we hit 0, and we hold the lock */
496         return 1;
497 }
498 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);