]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - include/linux/rwsem_rt.h
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / include / linux / rwsem_rt.h
1 #ifndef _LINUX_RWSEM_RT_H
2 #define _LINUX_RWSEM_RT_H
3
4 #ifndef _LINUX_RWSEM_H
5 #error "Include rwsem.h"
6 #endif
7
8 /*
9  * RW-semaphores are a spinlock plus a reader-depth count.
10  *
11  * Note that the semantics are different from the usual
12  * Linux rw-sems, in PREEMPT_RT mode we do not allow
13  * multiple readers to hold the lock at once, we only allow
14  * a read-lock owner to read-lock recursively. This is
15  * better for latency, makes the implementation inherently
16  * fair and makes it simpler as well.
17  */
18
19 #include <linux/rtmutex.h>
20
21 struct rw_semaphore {
22         struct rt_mutex         lock;
23         int                     read_depth;
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
25         struct lockdep_map      dep_map;
26 #endif
27 };
28
29 #define __RWSEM_INITIALIZER(name) \
30         { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
31           RW_DEP_MAP_INIT(name) }
32
33 #define DECLARE_RWSEM(lockname) \
34         struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
35
36 extern void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
37                                      struct lock_class_key *key);
38
39 #define __rt_init_rwsem(sem, name, key)                 \
40         do {                                            \
41                 rt_mutex_init(&(sem)->lock);            \
42                 __rt_rwsem_init((sem), (name), (key));\
43         } while (0)
44
45 #define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
46
47 # define rt_init_rwsem(sem)                             \
48 do {                                                    \
49         static struct lock_class_key __key;             \
50                                                         \
51         __rt_init_rwsem((sem), #sem, &__key);           \
52 } while (0)
53
54 extern void rt_down_write(struct rw_semaphore *rwsem);
55 extern int  rt_down_write_killable(struct rw_semaphore *rwsem);
56 extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
57 extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
58 extern int  rt_down_write_killable_nested(struct rw_semaphore *rwsem,
59                                           int subclass);
60 extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
61                                       struct lockdep_map *nest);
62 extern void rt__down_read(struct rw_semaphore *rwsem);
63 extern void rt_down_read(struct rw_semaphore *rwsem);
64 extern int  rt_down_write_trylock(struct rw_semaphore *rwsem);
65 extern int  rt__down_read_trylock(struct rw_semaphore *rwsem);
66 extern int  rt_down_read_trylock(struct rw_semaphore *rwsem);
67 extern void __rt_up_read(struct rw_semaphore *rwsem);
68 extern void rt_up_read(struct rw_semaphore *rwsem);
69 extern void rt_up_write(struct rw_semaphore *rwsem);
70 extern void rt_downgrade_write(struct rw_semaphore *rwsem);
71
72 #define init_rwsem(sem)         rt_init_rwsem(sem)
73 #define rwsem_is_locked(s)      rt_mutex_is_locked(&(s)->lock)
74
75 static inline int rwsem_is_contended(struct rw_semaphore *sem)
76 {
77         /* rt_mutex_has_waiters() */
78         return !RB_EMPTY_ROOT(&sem->lock.waiters);
79 }
80
81 static inline void __down_read(struct rw_semaphore *sem)
82 {
83         rt__down_read(sem);
84 }
85
86 static inline void down_read(struct rw_semaphore *sem)
87 {
88         rt_down_read(sem);
89 }
90
91 static inline int __down_read_trylock(struct rw_semaphore *sem)
92 {
93         return rt__down_read_trylock(sem);
94 }
95
96 static inline int down_read_trylock(struct rw_semaphore *sem)
97 {
98         return rt_down_read_trylock(sem);
99 }
100
101 static inline void down_write(struct rw_semaphore *sem)
102 {
103         rt_down_write(sem);
104 }
105
106 static inline int down_write_killable(struct rw_semaphore *sem)
107 {
108         return rt_down_write_killable(sem);
109 }
110
111 static inline int down_write_trylock(struct rw_semaphore *sem)
112 {
113         return rt_down_write_trylock(sem);
114 }
115
116 static inline void __up_read(struct rw_semaphore *sem)
117 {
118         __rt_up_read(sem);
119 }
120
121 static inline void up_read(struct rw_semaphore *sem)
122 {
123         rt_up_read(sem);
124 }
125
126 static inline void up_write(struct rw_semaphore *sem)
127 {
128         rt_up_write(sem);
129 }
130
131 static inline void downgrade_write(struct rw_semaphore *sem)
132 {
133         rt_downgrade_write(sem);
134 }
135
136 static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
137 {
138         return rt_down_read_nested(sem, subclass);
139 }
140
141 static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
142 {
143         rt_down_write_nested(sem, subclass);
144 }
145
146 static inline int down_write_killable_nested(struct rw_semaphore *sem,
147                                              int subclass)
148 {
149         return rt_down_write_killable_nested(sem, subclass);
150 }
151
152 #ifdef CONFIG_DEBUG_LOCK_ALLOC
153 static inline void down_write_nest_lock(struct rw_semaphore *sem,
154                 struct rw_semaphore *nest_lock)
155 {
156         rt_down_write_nested_lock(sem, &nest_lock->dep_map);
157 }
158
159 #else
160
161 static inline void down_write_nest_lock(struct rw_semaphore *sem,
162                 struct rw_semaphore *nest_lock)
163 {
164         rt_down_write_nested_lock(sem, NULL);
165 }
166 #endif
167 #endif