]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0081-hrtimers-Prepare-full-preemption.patch
rt_patches: required rebase due to printk change
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0081-hrtimers-Prepare-full-preemption.patch
1 From f359d7d790dcfe3603788704a88b77f4f4b8d2f7 Mon Sep 17 00:00:00 2001
2 From: Ingo Molnar <mingo@elte.hu>
3 Date: Fri, 3 Jul 2009 08:29:34 -0500
4 Subject: [PATCH 081/366] hrtimers: Prepare full preemption
5
6 Make cancellation of a running callback in softirq context safe
7 against preemption.
8
9 Signed-off-by: Ingo Molnar <mingo@elte.hu>
10 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
11 ---
12  include/linux/hrtimer.h    | 12 +++++++++++-
13  kernel/time/hrtimer.c      | 33 ++++++++++++++++++++++++++++++++-
14  kernel/time/itimer.c       |  1 +
15  kernel/time/posix-timers.c | 33 +++++++++++++++++++++++++++++++++
16  4 files changed, 77 insertions(+), 2 deletions(-)
17
18 diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
19 index 4a2e499..cbbbff1 100644
20 --- a/include/linux/hrtimer.h
21 +++ b/include/linux/hrtimer.h
22 @@ -206,6 +206,9 @@ struct hrtimer_cpu_base {
23         unsigned int                    nr_hangs;
24         unsigned int                    max_hang_time;
25  #endif
26 +#ifdef CONFIG_PREEMPT_RT_BASE
27 +       wait_queue_head_t               wait;
28 +#endif
29         struct hrtimer_clock_base       clock_base[HRTIMER_MAX_CLOCK_BASES];
30  } ____cacheline_aligned;
31  
32 @@ -415,6 +418,13 @@ static inline void hrtimer_restart(struct hrtimer *timer)
33         hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
34  }
35  
36 +/* Softirq preemption could deadlock timer removal */
37 +#ifdef CONFIG_PREEMPT_RT_BASE
38 +  extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
39 +#else
40 +# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
41 +#endif
42 +
43  /* Query timers: */
44  extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
45  
46 @@ -439,7 +449,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
47   * Helper function to check, whether the timer is running the callback
48   * function
49   */
50 -static inline int hrtimer_callback_running(struct hrtimer *timer)
51 +static inline int hrtimer_callback_running(const struct hrtimer *timer)
52  {
53         return timer->base->cpu_base->running == timer;
54  }
55 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
56 index f55424b..e1524f0 100644
57 --- a/kernel/time/hrtimer.c
58 +++ b/kernel/time/hrtimer.c
59 @@ -871,6 +871,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
60  }
61  EXPORT_SYMBOL_GPL(hrtimer_forward);
62  
63 +#ifdef CONFIG_PREEMPT_RT_BASE
64 +# define wake_up_timer_waiters(b)      wake_up(&(b)->wait)
65 +
66 +/**
67 + * hrtimer_wait_for_timer - Wait for a running timer
68 + *
69 + * @timer:     timer to wait for
70 + *
71 + * The function waits in case the timers callback function is
72 + * currently executed on the waitqueue of the timer base. The
73 + * waitqueue is woken up after the timer callback function has
74 + * finished execution.
75 + */
76 +void hrtimer_wait_for_timer(const struct hrtimer *timer)
77 +{
78 +       struct hrtimer_clock_base *base = timer->base;
79 +
80 +       if (base && base->cpu_base && !hrtimer_hres_active())
81 +               wait_event(base->cpu_base->wait,
82 +                               !(hrtimer_callback_running(timer)));
83 +}
84 +
85 +#else
86 +# define wake_up_timer_waiters(b)      do { } while (0)
87 +#endif
88 +
89  /*
90   * enqueue_hrtimer - internal function to (re)start a timer
91   *
92 @@ -1088,7 +1114,7 @@ int hrtimer_cancel(struct hrtimer *timer)
93  
94                 if (ret >= 0)
95                         return ret;
96 -               cpu_relax();
97 +               hrtimer_wait_for_timer(timer);
98         }
99  }
100  EXPORT_SYMBOL_GPL(hrtimer_cancel);
101 @@ -1479,6 +1505,8 @@ void hrtimer_run_queues(void)
102         now = hrtimer_update_base(cpu_base);
103         __hrtimer_run_queues(cpu_base, now);
104         raw_spin_unlock(&cpu_base->lock);
105 +
106 +       wake_up_timer_waiters(cpu_base);
107  }
108  
109  /*
110 @@ -1638,6 +1666,9 @@ static void init_hrtimers_cpu(int cpu)
111  
112         cpu_base->cpu = cpu;
113         hrtimer_init_hres(cpu_base);
114 +#ifdef CONFIG_PREEMPT_RT_BASE
115 +       init_waitqueue_head(&cpu_base->wait);
116 +#endif
117  }
118  
119  #ifdef CONFIG_HOTPLUG_CPU
120 diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
121 index 1d5c720..184de67 100644
122 --- a/kernel/time/itimer.c
123 +++ b/kernel/time/itimer.c
124 @@ -213,6 +213,7 @@ again:
125                 /* We are sharing ->siglock with it_real_fn() */
126                 if (hrtimer_try_to_cancel(timer) < 0) {
127                         spin_unlock_irq(&tsk->sighand->siglock);
128 +                       hrtimer_wait_for_timer(&tsk->signal->real_timer);
129                         goto again;
130                 }
131                 expires = timeval_to_ktime(value->it_value);
132 diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
133 index 81aec84..464a981 100644
134 --- a/kernel/time/posix-timers.c
135 +++ b/kernel/time/posix-timers.c
136 @@ -828,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
137         return overrun;
138  }
139  
140 +/*
141 + * Protected by RCU!
142 + */
143 +static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
144 +{
145 +#ifdef CONFIG_PREEMPT_RT_FULL
146 +       if (kc->timer_set == common_timer_set)
147 +               hrtimer_wait_for_timer(&timr->it.real.timer);
148 +       else
149 +               /* FIXME: Whacky hack for posix-cpu-timers */
150 +               schedule_timeout(1);
151 +#endif
152 +}
153 +
154  /* Set a POSIX.1b interval timer. */
155  /* timr->it_lock is taken. */
156  static int
157 @@ -905,6 +919,7 @@ retry:
158         if (!timr)
159                 return -EINVAL;
160  
161 +       rcu_read_lock();
162         kc = clockid_to_kclock(timr->it_clock);
163         if (WARN_ON_ONCE(!kc || !kc->timer_set))
164                 error = -EINVAL;
165 @@ -913,9 +928,12 @@ retry:
166  
167         unlock_timer(timr, flag);
168         if (error == TIMER_RETRY) {
169 +               timer_wait_for_callback(kc, timr);
170                 rtn = NULL;     // We already got the old time...
171 +               rcu_read_unlock();
172                 goto retry;
173         }
174 +       rcu_read_unlock();
175  
176         if (old_setting && !error &&
177             copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
178 @@ -953,10 +971,15 @@ retry_delete:
179         if (!timer)
180                 return -EINVAL;
181  
182 +       rcu_read_lock();
183         if (timer_delete_hook(timer) == TIMER_RETRY) {
184                 unlock_timer(timer, flags);
185 +               timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
186 +                                       timer);
187 +               rcu_read_unlock();
188                 goto retry_delete;
189         }
190 +       rcu_read_unlock();
191  
192         spin_lock(&current->sighand->siglock);
193         list_del(&timer->list);
194 @@ -982,8 +1005,18 @@ static void itimer_delete(struct k_itimer *timer)
195  retry_delete:
196         spin_lock_irqsave(&timer->it_lock, flags);
197  
198 +       /* On RT we can race with a deletion */
199 +       if (!timer->it_signal) {
200 +               unlock_timer(timer, flags);
201 +               return;
202 +       }
203 +
204         if (timer_delete_hook(timer) == TIMER_RETRY) {
205 +               rcu_read_lock();
206                 unlock_timer(timer, flags);
207 +               timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
208 +                                       timer);
209 +               rcu_read_unlock();
210                 goto retry_delete;
211         }
212         list_del(&timer->list);
213 -- 
214 1.9.1
215