1 #ifndef _LINUX_RWSEM_RT_H
2 #define _LINUX_RWSEM_RT_H
5 #error "Include rwsem.h"
9 * RW-semaphores are a spinlock plus a reader-depth count.
11 * Note that the semantics are different from the usual
12 * Linux rw-sems, in PREEMPT_RT mode we do not allow
13 * multiple readers to hold the lock at once, we only allow
14 * a read-lock owner to read-lock recursively. This is
15 * better for latency, makes the implementation inherently
16 * fair and makes it simpler as well.
19 #include <linux/rtmutex.h>
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
25 struct lockdep_map dep_map;
29 #define __RWSEM_INITIALIZER(name) \
30 { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
31 RW_DEP_MAP_INIT(name) }
33 #define DECLARE_RWSEM(lockname) \
34 struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
36 extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
37 struct lock_class_key *key);
39 #define __rt_init_rwsem(sem, name, key) \
41 rt_mutex_init(&(sem)->lock); \
42 __rt_rwsem_init((sem), (name), (key));\
45 #define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
47 # define rt_init_rwsem(sem) \
49 static struct lock_class_key __key; \
51 __rt_init_rwsem((sem), #sem, &__key); \
54 extern void rt_down_write(struct rw_semaphore *rwsem);
55 extern int rt_down_write_killable(struct rw_semaphore *rwsem);
56 extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
57 extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
58 extern int rt_down_write_killable_nested(struct rw_semaphore *rwsem,
60 extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
61 struct lockdep_map *nest);
62 extern void rt__down_read(struct rw_semaphore *rwsem);
63 extern void rt_down_read(struct rw_semaphore *rwsem);
64 extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
65 extern int rt__down_read_trylock(struct rw_semaphore *rwsem);
66 extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
67 extern void __rt_up_read(struct rw_semaphore *rwsem);
68 extern void rt_up_read(struct rw_semaphore *rwsem);
69 extern void rt_up_write(struct rw_semaphore *rwsem);
70 extern void rt_downgrade_write(struct rw_semaphore *rwsem);
72 #define init_rwsem(sem) rt_init_rwsem(sem)
73 #define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
75 static inline int rwsem_is_contended(struct rw_semaphore *sem)
77 /* rt_mutex_has_waiters() */
78 return !RB_EMPTY_ROOT(&sem->lock.waiters);
81 static inline void __down_read(struct rw_semaphore *sem)
86 static inline void down_read(struct rw_semaphore *sem)
91 static inline int __down_read_trylock(struct rw_semaphore *sem)
93 return rt__down_read_trylock(sem);
96 static inline int down_read_trylock(struct rw_semaphore *sem)
98 return rt_down_read_trylock(sem);
101 static inline void down_write(struct rw_semaphore *sem)
106 static inline int down_write_killable(struct rw_semaphore *sem)
108 return rt_down_write_killable(sem);
111 static inline int down_write_trylock(struct rw_semaphore *sem)
113 return rt_down_write_trylock(sem);
116 static inline void __up_read(struct rw_semaphore *sem)
121 static inline void up_read(struct rw_semaphore *sem)
126 static inline void up_write(struct rw_semaphore *sem)
131 static inline void downgrade_write(struct rw_semaphore *sem)
133 rt_downgrade_write(sem);
136 static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
138 return rt_down_read_nested(sem, subclass);
141 static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
143 rt_down_write_nested(sem, subclass);
146 static inline int down_write_killable_nested(struct rw_semaphore *sem,
149 return rt_down_write_killable_nested(sem, subclass);
152 #ifdef CONFIG_DEBUG_LOCK_ALLOC
153 static inline void down_write_nest_lock(struct rw_semaphore *sem,
154 struct rw_semaphore *nest_lock)
156 rt_down_write_nested_lock(sem, &nest_lock->dep_map);
161 static inline void down_write_nest_lock(struct rw_semaphore *sem,
162 struct rw_semaphore *nest_lock)
164 rt_down_write_nested_lock(sem, NULL);