]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0126-rtmutex-Add-RT-aware-ww-locks.patch
rt_patches: required rebase due to printk change
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0126-rtmutex-Add-RT-aware-ww-locks.patch
1 From c8fb8b6dfe32d17346a70acfcd58a8022acbc869 Mon Sep 17 00:00:00 2001
2 From: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
3 Date: Mon, 28 Oct 2013 09:36:37 +0100
4 Subject: [PATCH 126/366] rtmutex: Add RT aware ww locks
5
6 lockdep says:
7 | --------------------------------------------------------------------------
8 | | Wound/wait tests |
9 | ---------------------
10 |                 ww api failures:  ok  |  ok  |  ok  |
11 |              ww contexts mixing:  ok  |  ok  |
12 |            finishing ww context:  ok  |  ok  |  ok  |  ok  |
13 |              locking mismatches:  ok  |  ok  |  ok  |
14 |                EDEADLK handling:  ok  |  ok  |  ok  |  ok  |  ok  |  ok  |  ok  |  ok  |  ok  |  ok  |
15 |          spinlock nest unlocked:  ok  |
16 | -----------------------------------------------------
17 |                                |block | try  |context|
18 | -----------------------------------------------------
19 |                         context:  ok  |  ok  |  ok  |
20 |                             try:  ok  |  ok  |  ok  |
21 |                           block:  ok  |  ok  |  ok  |
22 |                        spinlock:  ok  |  ok  |  ok  |
23
24 Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
25 ---
26  kernel/locking/rtmutex.c | 251 ++++++++++++++++++++++++++++++++++++++++++-----
27  1 file changed, 226 insertions(+), 25 deletions(-)
28
29 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
30 index 8275f47..7b59f63 100644
31 --- a/kernel/locking/rtmutex.c
32 +++ b/kernel/locking/rtmutex.c
33 @@ -21,6 +21,7 @@
34  #include <linux/sched/rt.h>
35  #include <linux/sched/deadline.h>
36  #include <linux/timer.h>
37 +#include <linux/ww_mutex.h>
38  
39  #include "rtmutex_common.h"
40  
41 @@ -1221,6 +1222,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
42  
43  #endif /* PREEMPT_RT_FULL */
44  
45 +#ifdef CONFIG_PREEMPT_RT_FULL
46 +       static inline int __sched
47 +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
48 +{
49 +       struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
50 +       struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
51 +
52 +       if (!hold_ctx)
53 +               return 0;
54 +
55 +       if (unlikely(ctx == hold_ctx))
56 +               return -EALREADY;
57 +
58 +       if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
59 +           (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
60 +#ifdef CONFIG_DEBUG_MUTEXES
61 +               DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
62 +               ctx->contending_lock = ww;
63 +#endif
64 +               return -EDEADLK;
65 +       }
66 +
67 +       return 0;
68 +}
69 +#else
70 +       static inline int __sched
71 +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
72 +{
73 +       BUG();
74 +       return 0;
75 +}
76 +
77 +#endif
78 +
79  static inline int
80  try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
81                      struct rt_mutex_waiter *waiter)
82 @@ -1478,7 +1513,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
83  static int __sched
84  __rt_mutex_slowlock(struct rt_mutex *lock, int state,
85                     struct hrtimer_sleeper *timeout,
86 -                   struct rt_mutex_waiter *waiter)
87 +                   struct rt_mutex_waiter *waiter,
88 +                   struct ww_acquire_ctx *ww_ctx)
89  {
90         int ret = 0;
91  
92 @@ -1501,6 +1537,12 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
93                                 break;
94                 }
95  
96 +               if (ww_ctx && ww_ctx->acquired > 0) {
97 +                       ret = __mutex_lock_check_stamp(lock, ww_ctx);
98 +                       if (ret)
99 +                               break;
100 +               }
101 +
102                 raw_spin_unlock(&lock->wait_lock);
103  
104                 debug_rt_mutex_print_deadlock(waiter);
105 @@ -1535,13 +1577,90 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
106         }
107  }
108  
109 +static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
110 +                                                  struct ww_acquire_ctx *ww_ctx)
111 +{
112 +#ifdef CONFIG_DEBUG_MUTEXES
113 +       /*
114 +        * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
115 +        * but released with a normal mutex_unlock in this call.
116 +        *
117 +        * This should never happen, always use ww_mutex_unlock.
118 +        */
119 +       DEBUG_LOCKS_WARN_ON(ww->ctx);
120 +
121 +       /*
122 +        * Not quite done after calling ww_acquire_done() ?
123 +        */
124 +       DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
125 +
126 +       if (ww_ctx->contending_lock) {
127 +               /*
128 +                * After -EDEADLK you tried to
129 +                * acquire a different ww_mutex? Bad!
130 +                */
131 +               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
132 +
133 +               /*
134 +                * You called ww_mutex_lock after receiving -EDEADLK,
135 +                * but 'forgot' to unlock everything else first?
136 +                */
137 +               DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
138 +               ww_ctx->contending_lock = NULL;
139 +       }
140 +
141 +       /*
142 +        * Naughty, using a different class will lead to undefined behavior!
143 +        */
144 +       DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
145 +#endif
146 +       ww_ctx->acquired++;
147 +}
148 +
149 +#ifdef CONFIG_PREEMPT_RT_FULL
150 +static void ww_mutex_account_lock(struct rt_mutex *lock,
151 +                                 struct ww_acquire_ctx *ww_ctx)
152 +{
153 +       struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
154 +       struct rt_mutex_waiter *waiter, *n;
155 +
156 +       /*
157 +        * This branch gets optimized out for the common case,
158 +        * and is only important for ww_mutex_lock.
159 +        */
160 +       ww_mutex_lock_acquired(ww, ww_ctx);
161 +       ww->ctx = ww_ctx;
162 +
163 +       /*
164 +        * Give any possible sleeping processes the chance to wake up,
165 +        * so they can recheck if they have to back off.
166 +        */
167 +       rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
168 +                                            tree_entry) {
169 +               /* XXX debug rt mutex waiter wakeup */
170 +
171 +               BUG_ON(waiter->lock != lock);
172 +               rt_mutex_wake_waiter(waiter);
173 +       }
174 +}
175 +
176 +#else
177 +
178 +static void ww_mutex_account_lock(struct rt_mutex *lock,
179 +                                 struct ww_acquire_ctx *ww_ctx)
180 +{
181 +       BUG();
182 +}
183 +#endif
184 +
185  /*
186   * Slow path lock function:
187   */
188  static int __sched
189  rt_mutex_slowlock(struct rt_mutex *lock, int state,
190                   struct hrtimer_sleeper *timeout,
191 -                 enum rtmutex_chainwalk chwalk)
192 +                 enum rtmutex_chainwalk chwalk,
193 +                 struct ww_acquire_ctx *ww_ctx)
194  {
195         struct rt_mutex_waiter waiter;
196         int ret = 0;
197 @@ -1552,6 +1671,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
198  
199         /* Try to acquire the lock again: */
200         if (try_to_take_rt_mutex(lock, current, NULL)) {
201 +               if (ww_ctx)
202 +                       ww_mutex_account_lock(lock, ww_ctx);
203                 raw_spin_unlock(&lock->wait_lock);
204                 return 0;
205         }
206 @@ -1566,13 +1687,23 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
207  
208         if (likely(!ret))
209                 /* sleep on the mutex */
210 -               ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
211 +               ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
212 +                                         ww_ctx);
213 +       else if (ww_ctx) {
214 +               /* ww_mutex received EDEADLK, let it become EALREADY */
215 +               ret = __mutex_lock_check_stamp(lock, ww_ctx);
216 +               BUG_ON(!ret);
217 +       }
218  
219         if (unlikely(ret)) {
220                 __set_current_state(TASK_RUNNING);
221                 if (rt_mutex_has_waiters(lock))
222                         remove_waiter(lock, &waiter);
223 -               rt_mutex_handle_deadlock(ret, chwalk, &waiter);
224 +               /* ww_mutex want to report EDEADLK/EALREADY, let them */
225 +               if (!ww_ctx)
226 +                       rt_mutex_handle_deadlock(ret, chwalk, &waiter);
227 +       } else if (ww_ctx) {
228 +               ww_mutex_account_lock(lock, ww_ctx);
229         }
230  
231         /*
232 @@ -1701,31 +1832,36 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
233   */
234  static inline int
235  rt_mutex_fastlock(struct rt_mutex *lock, int state,
236 +                 struct ww_acquire_ctx *ww_ctx,
237                   int (*slowfn)(struct rt_mutex *lock, int state,
238                                 struct hrtimer_sleeper *timeout,
239 -                               enum rtmutex_chainwalk chwalk))
240 +                               enum rtmutex_chainwalk chwalk,
241 +                               struct ww_acquire_ctx *ww_ctx))
242  {
243         if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
244                 rt_mutex_deadlock_account_lock(lock, current);
245                 return 0;
246         } else
247 -               return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
248 +               return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
249 +                             ww_ctx);
250  }
251  
252  static inline int
253  rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
254                         struct hrtimer_sleeper *timeout,
255                         enum rtmutex_chainwalk chwalk,
256 +                       struct ww_acquire_ctx *ww_ctx,
257                         int (*slowfn)(struct rt_mutex *lock, int state,
258                                       struct hrtimer_sleeper *timeout,
259 -                                     enum rtmutex_chainwalk chwalk))
260 +                                     enum rtmutex_chainwalk chwalk,
261 +                                     struct ww_acquire_ctx *ww_ctx))
262  {
263         if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
264             likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
265                 rt_mutex_deadlock_account_lock(lock, current);
266                 return 0;
267         } else
268 -               return slowfn(lock, state, timeout, chwalk);
269 +               return slowfn(lock, state, timeout, chwalk, ww_ctx);
270  }
271  
272  static inline int
273 @@ -1772,7 +1908,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
274  {
275         might_sleep();
276  
277 -       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
278 +       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
279  }
280  EXPORT_SYMBOL_GPL(rt_mutex_lock);
281  
282 @@ -1789,7 +1925,7 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
283  {
284         might_sleep();
285  
286 -       return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
287 +       return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
288  }
289  EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
290  
291 @@ -1802,7 +1938,7 @@ int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
292         might_sleep();
293  
294         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
295 -                                      RT_MUTEX_FULL_CHAINWALK,
296 +                                      RT_MUTEX_FULL_CHAINWALK, NULL,
297                                        rt_mutex_slowlock);
298  }
299  
300 @@ -1821,7 +1957,7 @@ int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
301  {
302         might_sleep();
303  
304 -       return rt_mutex_fastlock(lock, TASK_KILLABLE, rt_mutex_slowlock);
305 +       return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
306  }
307  EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
308  
309 @@ -1845,6 +1981,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
310  
311         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
312                                        RT_MUTEX_MIN_CHAINWALK,
313 +                                      NULL,
314                                        rt_mutex_slowlock);
315  }
316  EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
317 @@ -2099,7 +2236,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
318         set_current_state(TASK_INTERRUPTIBLE);
319  
320         /* sleep on the mutex */
321 -       ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
322 +       ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
323  
324         if (unlikely(ret))
325                 remove_waiter(lock, waiter);
326 @@ -2115,24 +2252,88 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
327         return ret;
328  }
329  
330 -#ifdef CONFIG_PREEMPT_RT_FULL
331 -struct ww_mutex {
332 -};
333 -struct ww_acquire_ctx {
334 -};
335 -int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
336 +static inline int
337 +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
338  {
339 -       BUG();
340 +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
341 +       unsigned tmp;
342 +
343 +       if (ctx->deadlock_inject_countdown-- == 0) {
344 +               tmp = ctx->deadlock_inject_interval;
345 +               if (tmp > UINT_MAX/4)
346 +                       tmp = UINT_MAX;
347 +               else
348 +                       tmp = tmp*2 + tmp + tmp/2;
349 +
350 +               ctx->deadlock_inject_interval = tmp;
351 +               ctx->deadlock_inject_countdown = tmp;
352 +               ctx->contending_lock = lock;
353 +
354 +               ww_mutex_unlock(lock);
355 +
356 +               return -EDEADLK;
357 +       }
358 +#endif
359 +
360 +       return 0;
361  }
362 -EXPORT_SYMBOL_GPL(__ww_mutex_lock);
363 -int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
364 +
365 +#ifdef CONFIG_PREEMPT_RT_FULL
366 +int __sched
367 +__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
368  {
369 -       BUG();
370 +       int ret;
371 +
372 +       might_sleep();
373 +
374 +       mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
375 +       ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
376 +       if (ret)
377 +               mutex_release(&lock->base.dep_map, 1, _RET_IP_);
378 +       else if (!ret && ww_ctx->acquired > 1)
379 +               return ww_mutex_deadlock_injection(lock, ww_ctx);
380 +
381 +       return ret;
382  }
383  EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
384 +
385 +int __sched
386 +__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
387 +{
388 +       int ret;
389 +
390 +       might_sleep();
391 +
392 +       mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
393 +       ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
394 +       if (ret)
395 +               mutex_release(&lock->base.dep_map, 1, _RET_IP_);
396 +       else if (!ret && ww_ctx->acquired > 1)
397 +               return ww_mutex_deadlock_injection(lock, ww_ctx);
398 +
399 +       return ret;
400 +}
401 +EXPORT_SYMBOL_GPL(__ww_mutex_lock);
402 +
403  void __sched ww_mutex_unlock(struct ww_mutex *lock)
404  {
405 -       BUG();
406 +       int nest = !!lock->ctx;
407 +
408 +       /*
409 +        * The unlocking fastpath is the 0->1 transition from 'locked'
410 +        * into 'unlocked' state:
411 +        */
412 +       if (nest) {
413 +#ifdef CONFIG_DEBUG_MUTEXES
414 +               DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
415 +#endif
416 +               if (lock->ctx->acquired > 0)
417 +                       lock->ctx->acquired--;
418 +               lock->ctx = NULL;
419 +       }
420 +
421 +       mutex_release(&lock->base.dep_map, nest, _RET_IP_);
422 +       rt_mutex_unlock(&lock->base.lock);
423  }
424 -EXPORT_SYMBOL_GPL(ww_mutex_unlock);
425 +EXPORT_SYMBOL(ww_mutex_unlock);
426  #endif
427 -- 
428 1.9.1
429