]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0291-rt-locking-Reenable-migration-accross-schedule.patch
Fix memguard and related syscalls
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0291-rt-locking-Reenable-migration-accross-schedule.patch
1 From be08fdc7193bb9321b8029b736818426ea61d99a Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Mon, 8 Feb 2016 16:15:28 +0100
4 Subject: [PATCH 291/366] rt/locking: Reenable migration accross schedule
5
6 We currently disable migration across lock acquisition. That includes the part
7 where we block on the lock and schedule out. We cannot disable migration after
8 taking the lock as that would cause a possible lock inversion.
9
10 But we can be smart and enable migration when we block and schedule out. That
11 allows the scheduler to place the task freely at least if this is the first
12 migrate disable level. For nested locking this does not help at all.
13
14 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
15 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
16 ---
17  kernel/locking/rtmutex.c | 32 ++++++++++++++++++++------------
18  1 file changed, 20 insertions(+), 12 deletions(-)
19
20 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
21 index 913aa40..6697100 100644
22 --- a/kernel/locking/rtmutex.c
23 +++ b/kernel/locking/rtmutex.c
24 @@ -924,14 +924,19 @@ takeit:
25   * preemptible spin_lock functions:
26   */
27  static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
28 -                                        void  (*slowfn)(struct rt_mutex *lock))
29 +                                        void  (*slowfn)(struct rt_mutex *lock,
30 +                                                        bool mg_off),
31 +                                        bool do_mig_dis)
32  {
33         might_sleep_no_state_check();
34  
35 +       if (do_mig_dis)
36 +               migrate_disable();
37 +
38         if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
39                 rt_mutex_deadlock_account_lock(lock, current);
40         else
41 -               slowfn(lock);
42 +               slowfn(lock, do_mig_dis);
43  }
44  
45  static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
46 @@ -989,7 +994,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
47   * We store the current state under p->pi_lock in p->saved_state and
48   * the try_to_wake_up() code handles this accordingly.
49   */
50 -static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
51 +static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock,
52 +                                                   bool mg_off)
53  {
54         struct task_struct *lock_owner, *self = current;
55         struct rt_mutex_waiter waiter, *top_waiter;
56 @@ -1033,8 +1039,13 @@ static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
57  
58                 debug_rt_mutex_print_deadlock(&waiter);
59  
60 -               if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
61 +               if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
62 +                       if (mg_off)
63 +                               migrate_enable();
64                         schedule();
65 +                       if (mg_off)
66 +                               migrate_disable();
67 +               }
68  
69                 raw_spin_lock_irqsave(&lock->wait_lock, flags);
70  
71 @@ -1105,38 +1116,35 @@ static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
72  
73  void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
74  {
75 -       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
76 +       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
77         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
78  }
79  EXPORT_SYMBOL(rt_spin_lock__no_mg);
80  
81  void __lockfunc rt_spin_lock(spinlock_t *lock)
82  {
83 -       migrate_disable();
84 -       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
85 +       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
86         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
87  }
88  EXPORT_SYMBOL(rt_spin_lock);
89  
90  void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
91  {
92 -       migrate_disable();
93 -       rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
94 +       rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
95  }
96  EXPORT_SYMBOL(__rt_spin_lock);
97  
98  void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
99  {
100 -       rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
101 +       rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
102  }
103  EXPORT_SYMBOL(__rt_spin_lock__no_mg);
104  
105  #ifdef CONFIG_DEBUG_LOCK_ALLOC
106  void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
107  {
108 -       migrate_disable();
109 -       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
110         spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
111 +       rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
112  }
113  EXPORT_SYMBOL(rt_spin_lock_nested);
114  #endif
115 -- 
116 1.9.1
117