]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - kernel/time/timer.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / kernel / time / timer.c
index c611c47de8849b5ac68a8085d85dcd86e143d907..08a5ab7624950ea2f1cb76d3abfef33122673b85 100644 (file)
@@ -193,8 +193,11 @@ EXPORT_SYMBOL(jiffies_64);
 #endif
 
 struct timer_base {
-       spinlock_t              lock;
+       raw_spinlock_t          lock;
        struct timer_list       *running_timer;
+#ifdef CONFIG_PREEMPT_RT_FULL
+       struct swait_queue_head wait_for_running_timer;
+#endif
        unsigned long           clk;
        unsigned long           next_expiry;
        unsigned int            cpu;
@@ -948,10 +951,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
 
                if (!(tf & TIMER_MIGRATING)) {
                        base = get_timer_base(tf);
-                       spin_lock_irqsave(&base->lock, *flags);
+                       raw_spin_lock_irqsave(&base->lock, *flags);
                        if (timer->flags == tf)
                                return base;
-                       spin_unlock_irqrestore(&base->lock, *flags);
+                       raw_spin_unlock_irqrestore(&base->lock, *flags);
                }
                cpu_relax();
        }
@@ -1023,9 +1026,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
                        /* See the comment in lock_timer_base() */
                        timer->flags |= TIMER_MIGRATING;
 
-                       spin_unlock(&base->lock);
+                       raw_spin_unlock(&base->lock);
                        base = new_base;
-                       spin_lock(&base->lock);
+                       raw_spin_lock(&base->lock);
                        WRITE_ONCE(timer->flags,
                                   (timer->flags & ~TIMER_BASEMASK) | base->cpu);
                }
@@ -1050,7 +1053,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
        }
 
 out_unlock:
-       spin_unlock_irqrestore(&base->lock, flags);
+       raw_spin_unlock_irqrestore(&base->lock, flags);
 
        return ret;
 }
@@ -1144,19 +1147,46 @@ void add_timer_on(struct timer_list *timer, int cpu)
        if (base != new_base) {
                timer->flags |= TIMER_MIGRATING;
 
-               spin_unlock(&base->lock);
+               raw_spin_unlock(&base->lock);
                base = new_base;
-               spin_lock(&base->lock);
+               raw_spin_lock(&base->lock);
                WRITE_ONCE(timer->flags,
                           (timer->flags & ~TIMER_BASEMASK) | cpu);
        }
 
        debug_activate(timer, timer->expires);
        internal_add_timer(base, timer);
-       spin_unlock_irqrestore(&base->lock, flags);
+       raw_spin_unlock_irqrestore(&base->lock, flags);
 }
 EXPORT_SYMBOL_GPL(add_timer_on);
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * Wait for a running timer
+ */
+static void wait_for_running_timer(struct timer_list *timer)
+{
+       struct timer_base *base;
+       u32 tf = timer->flags;
+
+       if (tf & TIMER_MIGRATING)
+               return;
+
+       base = get_timer_base(tf);
+       swait_event(base->wait_for_running_timer,
+                  base->running_timer != timer);
+}
+
+# define wakeup_timer_waiters(b)       swake_up_all(&(b)->wait_for_running_timer)
+#else
+static inline void wait_for_running_timer(struct timer_list *timer)
+{
+       cpu_relax();
+}
+
+# define wakeup_timer_waiters(b)       do { } while (0)
+#endif
+
 /**
  * del_timer - deactive a timer.
  * @timer: the timer to be deactivated
@@ -1180,7 +1210,7 @@ int del_timer(struct timer_list *timer)
        if (timer_pending(timer)) {
                base = lock_timer_base(timer, &flags);
                ret = detach_if_pending(timer, base, true);
-               spin_unlock_irqrestore(&base->lock, flags);
+               raw_spin_unlock_irqrestore(&base->lock, flags);
        }
 
        return ret;
@@ -1208,13 +1238,13 @@ int try_to_del_timer_sync(struct timer_list *timer)
                timer_stats_timer_clear_start_info(timer);
                ret = detach_if_pending(timer, base, true);
        }
-       spin_unlock_irqrestore(&base->lock, flags);
+       raw_spin_unlock_irqrestore(&base->lock, flags);
 
        return ret;
 }
 EXPORT_SYMBOL(try_to_del_timer_sync);
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
 /**
  * del_timer_sync - deactivate a timer and wait for the handler to finish.
  * @timer: the timer to be deactivated
@@ -1274,7 +1304,7 @@ int del_timer_sync(struct timer_list *timer)
                int ret = try_to_del_timer_sync(timer);
                if (ret >= 0)
                        return ret;
-               cpu_relax();
+               wait_for_running_timer(timer);
        }
 }
 EXPORT_SYMBOL(del_timer_sync);
@@ -1339,14 +1369,17 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
                fn = timer->function;
                data = timer->data;
 
-               if (timer->flags & TIMER_IRQSAFE) {
-                       spin_unlock(&base->lock);
+               if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) &&
+                   timer->flags & TIMER_IRQSAFE) {
+                       raw_spin_unlock(&base->lock);
                        call_timer_fn(timer, fn, data);
-                       spin_lock(&base->lock);
+                       base->running_timer = NULL;
+                       raw_spin_lock(&base->lock);
                } else {
-                       spin_unlock_irq(&base->lock);
+                       raw_spin_unlock_irq(&base->lock);
                        call_timer_fn(timer, fn, data);
-                       spin_lock_irq(&base->lock);
+                       base->running_timer = NULL;
+                       raw_spin_lock_irq(&base->lock);
                }
        }
 }
@@ -1515,7 +1548,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
        if (cpu_is_offline(smp_processor_id()))
                return expires;
 
-       spin_lock(&base->lock);
+       raw_spin_lock(&base->lock);
        nextevt = __next_timer_interrupt(base);
        is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
        base->next_expiry = nextevt;
@@ -1543,7 +1576,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
                if ((expires - basem) > TICK_NSEC)
                        base->is_idle = true;
        }
-       spin_unlock(&base->lock);
+       raw_spin_unlock(&base->lock);
 
        return cmp_next_hrtimer_event(basem, expires);
 }
@@ -1608,13 +1641,13 @@ void update_process_times(int user_tick)
 
        /* Note: this timer irq context must be accounted for as well. */
        account_process_tick(p, user_tick);
+       scheduler_tick();
        run_local_timers();
        rcu_check_callbacks(user_tick);
-#ifdef CONFIG_IRQ_WORK
+#if defined(CONFIG_IRQ_WORK)
        if (in_irq())
                irq_work_tick();
 #endif
-       scheduler_tick();
        run_posix_cpu_timers(p);
 }
 
@@ -1630,7 +1663,7 @@ static inline void __run_timers(struct timer_base *base)
        if (!time_after_eq(jiffies, base->clk))
                return;
 
-       spin_lock_irq(&base->lock);
+       raw_spin_lock_irq(&base->lock);
 
        while (time_after_eq(jiffies, base->clk)) {
 
@@ -1640,8 +1673,8 @@ static inline void __run_timers(struct timer_base *base)
                while (levels--)
                        expire_timers(base, heads + levels);
        }
-       base->running_timer = NULL;
-       spin_unlock_irq(&base->lock);
+       raw_spin_unlock_irq(&base->lock);
+       wakeup_timer_waiters(base);
 }
 
 /*
@@ -1651,6 +1684,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
 {
        struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
+       irq_work_tick_soft();
+
        __run_timers(base);
        if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
                __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
@@ -1836,16 +1871,16 @@ int timers_dead_cpu(unsigned int cpu)
                 * The caller is globally serialized and nobody else
                 * takes two locks at once, deadlock is not possible.
                 */
-               spin_lock_irq(&new_base->lock);
-               spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+               raw_spin_lock_irq(&new_base->lock);
+               raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
                BUG_ON(old_base->running_timer);
 
                for (i = 0; i < WHEEL_SIZE; i++)
                        migrate_timer_list(new_base, old_base->vectors + i);
 
-               spin_unlock(&old_base->lock);
-               spin_unlock_irq(&new_base->lock);
+               raw_spin_unlock(&old_base->lock);
+               raw_spin_unlock_irq(&new_base->lock);
                put_cpu_ptr(&timer_bases);
        }
        return 0;
@@ -1861,8 +1896,11 @@ static void __init init_timer_cpu(int cpu)
        for (i = 0; i < NR_BASES; i++) {
                base = per_cpu_ptr(&timer_bases[i], cpu);
                base->cpu = cpu;
-               spin_lock_init(&base->lock);
+               raw_spin_lock_init(&base->lock);
                base->clk = jiffies;
+#ifdef CONFIG_PREEMPT_RT_FULL
+               init_swait_queue_head(&base->wait_for_running_timer);
+#endif
        }
 }