]> rtime.felk.cvut.cz Git - hercules2020/nv-tegra/linux-4.4.git/blob - rt-patches/0077-timers-Prepare-for-full-preemption.patch
rt_patches: required rebase due to printk change
[hercules2020/nv-tegra/linux-4.4.git] / rt-patches / 0077-timers-Prepare-for-full-preemption.patch
1 From 0c96f27da32acd7651c592add3b0c52cf4217b13 Mon Sep 17 00:00:00 2001
2 From: Ingo Molnar <mingo@elte.hu>
3 Date: Fri, 3 Jul 2009 08:29:34 -0500
4 Subject: [PATCH 077/366] timers: Prepare for full preemption
5
6 When softirqs can be preempted we need to make sure that cancelling
7 the timer from the active thread can not deadlock vs. a running timer
8 callback. Add a waitqueue to resolve that.
9
10 Signed-off-by: Ingo Molnar <mingo@elte.hu>
11 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
12 ---
13  include/linux/timer.h |  2 +-
14  kernel/sched/core.c   |  9 +++++++--
15  kernel/time/timer.c   | 39 +++++++++++++++++++++++++++++++++++++--
16  3 files changed, 45 insertions(+), 5 deletions(-)
17
18 diff --git a/include/linux/timer.h b/include/linux/timer.h
19 index 61aa61d..299d2b7 100644
20 --- a/include/linux/timer.h
21 +++ b/include/linux/timer.h
22 @@ -225,7 +225,7 @@ extern void add_timer(struct timer_list *timer);
23  
24  extern int try_to_del_timer_sync(struct timer_list *timer);
25  
26 -#ifdef CONFIG_SMP
27 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
28    extern int del_timer_sync(struct timer_list *timer);
29  #else
30  # define del_timer_sync(t)             del_timer(t)
31 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
32 index e7fe1ce..ddedd34 100644
33 --- a/kernel/sched/core.c
34 +++ b/kernel/sched/core.c
35 @@ -619,11 +619,14 @@ void resched_cpu(int cpu)
36   */
37  int get_nohz_timer_target(void)
38  {
39 -       int i, cpu = smp_processor_id();
40 +       int i, cpu;
41         struct sched_domain *sd;
42  
43 +       preempt_disable_rt();
44 +       cpu = smp_processor_id();
45 +
46         if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
47 -               return cpu;
48 +               goto preempt_en_rt;
49  
50         rcu_read_lock();
51         for_each_domain(cpu, sd) {
52 @@ -642,6 +645,8 @@ int get_nohz_timer_target(void)
53                 cpu = housekeeping_any_cpu();
54  unlock:
55         rcu_read_unlock();
56 +preempt_en_rt:
57 +       preempt_enable_rt();
58         return cpu;
59  }
60  /*
61 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
62 index bbc5d11..5fa215e 100644
63 --- a/kernel/time/timer.c
64 +++ b/kernel/time/timer.c
65 @@ -80,6 +80,9 @@ struct tvec_root {
66  struct tvec_base {
67         spinlock_t lock;
68         struct timer_list *running_timer;
69 +#ifdef CONFIG_PREEMPT_RT_FULL
70 +       wait_queue_head_t wait_for_running_timer;
71 +#endif
72         unsigned long timer_jiffies;
73         unsigned long next_timer;
74         unsigned long active_timers;
75 @@ -1006,6 +1009,33 @@ void add_timer_on(struct timer_list *timer, int cpu)
76  }
77  EXPORT_SYMBOL_GPL(add_timer_on);
78  
79 +#ifdef CONFIG_PREEMPT_RT_FULL
80 +/*
81 + * Wait for a running timer
82 + */
83 +static void wait_for_running_timer(struct timer_list *timer)
84 +{
85 +       struct tvec_base *base;
86 +       u32 tf = timer->flags;
87 +
88 +       if (tf & TIMER_MIGRATING)
89 +               return;
90 +
91 +       base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
92 +       wait_event(base->wait_for_running_timer,
93 +                  base->running_timer != timer);
94 +}
95 +
96 +# define wakeup_timer_waiters(b)       wake_up(&(b)->wait_for_running_timer)
97 +#else
98 +static inline void wait_for_running_timer(struct timer_list *timer)
99 +{
100 +       cpu_relax();
101 +}
102 +
103 +# define wakeup_timer_waiters(b)       do { } while (0)
104 +#endif
105 +
106  /**
107   * del_timer - deactive a timer.
108   * @timer: the timer to be deactivated
109 @@ -1123,7 +1153,7 @@ int del_timer_sync(struct timer_list *timer)
110                 int ret = try_to_del_timer_sync(timer);
111                 if (ret >= 0)
112                         return ret;
113 -               cpu_relax();
114 +               wait_for_running_timer(timer);
115         }
116  }
117  EXPORT_SYMBOL(del_timer_sync);
118 @@ -1248,15 +1278,17 @@ static inline void __run_timers(struct tvec_base *base)
119                         if (irqsafe) {
120                                 spin_unlock(&base->lock);
121                                 call_timer_fn(timer, fn, data);
122 +                               base->running_timer = NULL;
123                                 spin_lock(&base->lock);
124                         } else {
125                                 spin_unlock_irq(&base->lock);
126                                 call_timer_fn(timer, fn, data);
127 +                               base->running_timer = NULL;
128                                 spin_lock_irq(&base->lock);
129                         }
130                 }
131         }
132 -       base->running_timer = NULL;
133 +       wakeup_timer_waiters(base);
134         spin_unlock_irq(&base->lock);
135  }
136  
137 @@ -1645,6 +1677,9 @@ static void __init init_timer_cpu(int cpu)
138  
139         base->cpu = cpu;
140         spin_lock_init(&base->lock);
141 +#ifdef CONFIG_PREEMPT_RT_FULL
142 +       init_waitqueue_head(&base->wait_for_running_timer);
143 +#endif
144  
145         base->timer_jiffies = jiffies;
146         base->next_timer = base->timer_jiffies;
147 -- 
148 1.9.1
149