4 * Kernel internal timers
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/sched/sysctl.h>
43 #include <linux/slab.h>
44 #include <linux/compat.h>
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include <asm/div64.h>
49 #include <asm/timex.h>
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/timer.h>
55 __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
57 EXPORT_SYMBOL(jiffies_64);
60 * per-CPU timer vector definitions:
62 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
63 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
64 #define TVN_SIZE (1 << TVN_BITS)
65 #define TVR_SIZE (1 << TVR_BITS)
66 #define TVN_MASK (TVN_SIZE - 1)
67 #define TVR_MASK (TVR_SIZE - 1)
68 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
71 struct list_head vec[TVN_SIZE];
75 struct list_head vec[TVR_SIZE];
80 struct timer_list *running_timer;
81 #ifdef CONFIG_PREEMPT_RT_FULL
82 wait_queue_head_t wait_for_running_timer;
84 unsigned long timer_jiffies;
85 unsigned long next_timer;
86 unsigned long active_timers;
87 unsigned long all_timers;
94 } ____cacheline_aligned;
96 struct tvec_base boot_tvec_bases;
97 EXPORT_SYMBOL(boot_tvec_bases);
98 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
100 /* Functions below help us manage 'deferrable' flag */
101 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
103 return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
106 static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
108 return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
111 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
113 return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
117 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
119 unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
121 timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
124 static unsigned long round_jiffies_common(unsigned long j, int cpu,
128 unsigned long original = j;
131 * We don't want all cpus firing their timers at once hitting the
132 * same lock or cachelines, so we skew each extra cpu with an extra
133 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
135 * The skew is done by adding 3*cpunr, then round, then subtract this
136 * extra offset again.
143 * If the target jiffie is just after a whole second (which can happen
144 * due to delays of the timer irq, long irq off times etc etc) then
145 * we should round down to the whole second, not up. Use 1/4th second
146 * as cutoff for this rounding as an extreme upper bound for this.
147 * But never round down if @force_up is set.
149 if (rem < HZ/4 && !force_up) /* round down */
154 /* now that we have rounded, subtract the extra skew again */
158 * Make sure j is still in the future. Otherwise return the
161 return time_is_after_jiffies(j) ? j : original;
165 * __round_jiffies - function to round jiffies to a full second
166 * @j: the time in (absolute) jiffies that should be rounded
167 * @cpu: the processor number on which the timeout will happen
169 * __round_jiffies() rounds an absolute time in the future (in jiffies)
170 * up or down to (approximately) full seconds. This is useful for timers
171 * for which the exact time they fire does not matter too much, as long as
172 * they fire approximately every X seconds.
174 * By rounding these timers to whole seconds, all such timers will fire
175 * at the same time, rather than at various times spread out. The goal
176 * of this is to have the CPU wake up less, which saves power.
178 * The exact rounding is skewed for each processor to avoid all
179 * processors firing at the exact same time, which could lead
180 * to lock contention or spurious cache line bouncing.
182 * The return value is the rounded version of the @j parameter.
184 unsigned long __round_jiffies(unsigned long j, int cpu)
186 return round_jiffies_common(j, cpu, false);
188 EXPORT_SYMBOL_GPL(__round_jiffies);
191 * __round_jiffies_relative - function to round jiffies to a full second
192 * @j: the time in (relative) jiffies that should be rounded
193 * @cpu: the processor number on which the timeout will happen
195 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
196 * up or down to (approximately) full seconds. This is useful for timers
197 * for which the exact time they fire does not matter too much, as long as
198 * they fire approximately every X seconds.
200 * By rounding these timers to whole seconds, all such timers will fire
201 * at the same time, rather than at various times spread out. The goal
202 * of this is to have the CPU wake up less, which saves power.
204 * The exact rounding is skewed for each processor to avoid all
205 * processors firing at the exact same time, which could lead
206 * to lock contention or spurious cache line bouncing.
208 * The return value is the rounded version of the @j parameter.
210 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
212 unsigned long j0 = jiffies;
214 /* Use j0 because jiffies might change while we run */
215 return round_jiffies_common(j + j0, cpu, false) - j0;
217 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
220 * round_jiffies - function to round jiffies to a full second
221 * @j: the time in (absolute) jiffies that should be rounded
223 * round_jiffies() rounds an absolute time in the future (in jiffies)
224 * up or down to (approximately) full seconds. This is useful for timers
225 * for which the exact time they fire does not matter too much, as long as
226 * they fire approximately every X seconds.
228 * By rounding these timers to whole seconds, all such timers will fire
229 * at the same time, rather than at various times spread out. The goal
230 * of this is to have the CPU wake up less, which saves power.
232 * The return value is the rounded version of the @j parameter.
234 unsigned long round_jiffies(unsigned long j)
236 return round_jiffies_common(j, raw_smp_processor_id(), false);
238 EXPORT_SYMBOL_GPL(round_jiffies);
241 * round_jiffies_relative - function to round jiffies to a full second
242 * @j: the time in (relative) jiffies that should be rounded
244 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
245 * up or down to (approximately) full seconds. This is useful for timers
246 * for which the exact time they fire does not matter too much, as long as
247 * they fire approximately every X seconds.
249 * By rounding these timers to whole seconds, all such timers will fire
250 * at the same time, rather than at various times spread out. The goal
251 * of this is to have the CPU wake up less, which saves power.
253 * The return value is the rounded version of the @j parameter.
255 unsigned long round_jiffies_relative(unsigned long j)
257 return __round_jiffies_relative(j, raw_smp_processor_id());
259 EXPORT_SYMBOL_GPL(round_jiffies_relative);
262 * __round_jiffies_up - function to round jiffies up to a full second
263 * @j: the time in (absolute) jiffies that should be rounded
264 * @cpu: the processor number on which the timeout will happen
266 * This is the same as __round_jiffies() except that it will never
267 * round down. This is useful for timeouts for which the exact time
268 * of firing does not matter too much, as long as they don't fire too
271 unsigned long __round_jiffies_up(unsigned long j, int cpu)
273 return round_jiffies_common(j, cpu, true);
275 EXPORT_SYMBOL_GPL(__round_jiffies_up);
278 * __round_jiffies_up_relative - function to round jiffies up to a full second
279 * @j: the time in (relative) jiffies that should be rounded
280 * @cpu: the processor number on which the timeout will happen
282 * This is the same as __round_jiffies_relative() except that it will never
283 * round down. This is useful for timeouts for which the exact time
284 * of firing does not matter too much, as long as they don't fire too
287 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
289 unsigned long j0 = jiffies;
291 /* Use j0 because jiffies might change while we run */
292 return round_jiffies_common(j + j0, cpu, true) - j0;
294 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
297 * round_jiffies_up - function to round jiffies up to a full second
298 * @j: the time in (absolute) jiffies that should be rounded
300 * This is the same as round_jiffies() except that it will never
301 * round down. This is useful for timeouts for which the exact time
302 * of firing does not matter too much, as long as they don't fire too
305 unsigned long round_jiffies_up(unsigned long j)
307 return round_jiffies_common(j, raw_smp_processor_id(), true);
309 EXPORT_SYMBOL_GPL(round_jiffies_up);
312 * round_jiffies_up_relative - function to round jiffies up to a full second
313 * @j: the time in (relative) jiffies that should be rounded
315 * This is the same as round_jiffies_relative() except that it will never
316 * round down. This is useful for timeouts for which the exact time
317 * of firing does not matter too much, as long as they don't fire too
320 unsigned long round_jiffies_up_relative(unsigned long j)
322 return __round_jiffies_up_relative(j, raw_smp_processor_id());
324 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
327 * set_timer_slack - set the allowed slack for a timer
328 * @timer: the timer to be modified
329 * @slack_hz: the amount of time (in jiffies) allowed for rounding
331 * Set the amount of time, in jiffies, that a certain timer has
332 * in terms of slack. By setting this value, the timer subsystem
333 * will schedule the actual timer somewhere between
334 * the time mod_timer() asks for, and that time plus the slack.
336 * By setting the slack to -1, a percentage of the delay is used
339 void set_timer_slack(struct timer_list *timer, int slack_hz)
341 timer->slack = slack_hz;
343 EXPORT_SYMBOL_GPL(set_timer_slack);
346 * If the list is empty, catch up ->timer_jiffies to the current time.
347 * The caller must hold the tvec_base lock. Returns true if the list
348 * was empty and therefore ->timer_jiffies was updated.
350 static bool catchup_timer_jiffies(struct tvec_base *base)
352 if (!base->all_timers) {
353 base->timer_jiffies = jiffies;
360 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
362 unsigned long expires = timer->expires;
363 unsigned long idx = expires - base->timer_jiffies;
364 struct list_head *vec;
366 if (idx < TVR_SIZE) {
367 int i = expires & TVR_MASK;
368 vec = base->tv1.vec + i;
369 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
370 int i = (expires >> TVR_BITS) & TVN_MASK;
371 vec = base->tv2.vec + i;
372 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
373 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
374 vec = base->tv3.vec + i;
375 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
376 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
377 vec = base->tv4.vec + i;
378 } else if ((signed long) idx < 0) {
380 * Can happen if you add a timer with expires == jiffies,
381 * or you set a timer to go off in the past
383 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
386 /* If the timeout is larger than MAX_TVAL (on 64-bit
387 * architectures or with CONFIG_BASE_SMALL=1) then we
388 * use the maximum timeout.
390 if (idx > MAX_TVAL) {
392 expires = idx + base->timer_jiffies;
394 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
395 vec = base->tv5.vec + i;
400 list_add_tail(&timer->entry, vec);
403 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
405 (void)catchup_timer_jiffies(base);
406 __internal_add_timer(base, timer);
408 * Update base->active_timers and base->next_timer
410 if (!tbase_get_deferrable(timer->base)) {
411 if (!base->active_timers++ ||
412 time_before(timer->expires, base->next_timer))
413 base->next_timer = timer->expires;
418 * Check whether the other CPU is in dynticks mode and needs
419 * to be triggered to reevaluate the timer wheel.
420 * We are protected against the other CPU fiddling
421 * with the timer by holding the timer base lock. This also
422 * makes sure that a CPU on the way to stop its tick can not
423 * evaluate the timer wheel.
425 * Spare the IPI for deferrable timers on idle targets though.
426 * The next busy ticks will take care of it. Except full dynticks
427 * require special care against races with idle_cpu(), lets deal
430 if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu))
431 wake_up_nohz_cpu(base->cpu);
434 #ifdef CONFIG_TIMER_STATS
435 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
437 if (timer->start_site)
440 timer->start_site = addr;
441 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
442 timer->start_pid = current->pid;
445 static void timer_stats_account_timer(struct timer_list *timer)
447 unsigned int flag = 0;
449 if (likely(!timer->start_site))
451 if (unlikely(tbase_get_deferrable(timer->base)))
452 flag |= TIMER_STATS_FLAG_DEFERRABLE;
454 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
455 timer->function, timer->start_comm, flag);
459 static void timer_stats_account_timer(struct timer_list *timer) {}
462 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
464 static struct debug_obj_descr timer_debug_descr;
466 static void *timer_debug_hint(void *addr)
468 return ((struct timer_list *) addr)->function;
472 * fixup_init is called when:
473 * - an active object is initialized
475 static int timer_fixup_init(void *addr, enum debug_obj_state state)
477 struct timer_list *timer = addr;
480 case ODEBUG_STATE_ACTIVE:
481 del_timer_sync(timer);
482 debug_object_init(timer, &timer_debug_descr);
489 /* Stub timer callback for improperly used timers. */
490 static void stub_timer(unsigned long data)
496 * fixup_activate is called when:
497 * - an active object is activated
498 * - an unknown object is activated (might be a statically initialized object)
500 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
502 struct timer_list *timer = addr;
506 case ODEBUG_STATE_NOTAVAILABLE:
508 * This is not really a fixup. The timer was
509 * statically initialized. We just make sure that it
510 * is tracked in the object tracker.
512 if (timer->entry.next == NULL &&
513 timer->entry.prev == TIMER_ENTRY_STATIC) {
514 debug_object_init(timer, &timer_debug_descr);
515 debug_object_activate(timer, &timer_debug_descr);
518 setup_timer(timer, stub_timer, 0);
523 case ODEBUG_STATE_ACTIVE:
532 * fixup_free is called when:
533 * - an active object is freed
535 static int timer_fixup_free(void *addr, enum debug_obj_state state)
537 struct timer_list *timer = addr;
540 case ODEBUG_STATE_ACTIVE:
541 del_timer_sync(timer);
542 debug_object_free(timer, &timer_debug_descr);
550 * fixup_assert_init is called when:
551 * - an untracked/uninit-ed object is found
553 static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
555 struct timer_list *timer = addr;
558 case ODEBUG_STATE_NOTAVAILABLE:
559 if (timer->entry.prev == TIMER_ENTRY_STATIC) {
561 * This is not really a fixup. The timer was
562 * statically initialized. We just make sure that it
563 * is tracked in the object tracker.
565 debug_object_init(timer, &timer_debug_descr);
568 setup_timer(timer, stub_timer, 0);
576 static struct debug_obj_descr timer_debug_descr = {
577 .name = "timer_list",
578 .debug_hint = timer_debug_hint,
579 .fixup_init = timer_fixup_init,
580 .fixup_activate = timer_fixup_activate,
581 .fixup_free = timer_fixup_free,
582 .fixup_assert_init = timer_fixup_assert_init,
585 static inline void debug_timer_init(struct timer_list *timer)
587 debug_object_init(timer, &timer_debug_descr);
590 static inline void debug_timer_activate(struct timer_list *timer)
592 debug_object_activate(timer, &timer_debug_descr);
595 static inline void debug_timer_deactivate(struct timer_list *timer)
597 debug_object_deactivate(timer, &timer_debug_descr);
600 static inline void debug_timer_free(struct timer_list *timer)
602 debug_object_free(timer, &timer_debug_descr);
605 static inline void debug_timer_assert_init(struct timer_list *timer)
607 debug_object_assert_init(timer, &timer_debug_descr);
610 static void do_init_timer(struct timer_list *timer, unsigned int flags,
611 const char *name, struct lock_class_key *key);
613 void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
614 const char *name, struct lock_class_key *key)
616 debug_object_init_on_stack(timer, &timer_debug_descr);
617 do_init_timer(timer, flags, name, key);
619 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
621 void destroy_timer_on_stack(struct timer_list *timer)
623 debug_object_free(timer, &timer_debug_descr);
625 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
628 static inline void debug_timer_init(struct timer_list *timer) { }
629 static inline void debug_timer_activate(struct timer_list *timer) { }
630 static inline void debug_timer_deactivate(struct timer_list *timer) { }
631 static inline void debug_timer_assert_init(struct timer_list *timer) { }
634 static inline void debug_init(struct timer_list *timer)
636 debug_timer_init(timer);
637 trace_timer_init(timer);
641 debug_activate(struct timer_list *timer, unsigned long expires)
643 debug_timer_activate(timer);
644 trace_timer_start(timer, expires);
647 static inline void debug_deactivate(struct timer_list *timer)
649 debug_timer_deactivate(timer);
650 trace_timer_cancel(timer);
653 static inline void debug_assert_init(struct timer_list *timer)
655 debug_timer_assert_init(timer);
658 static void do_init_timer(struct timer_list *timer, unsigned int flags,
659 const char *name, struct lock_class_key *key)
661 struct tvec_base *base = raw_cpu_read(tvec_bases);
663 timer->entry.next = NULL;
664 timer->base = (void *)((unsigned long)base | flags);
666 #ifdef CONFIG_TIMER_STATS
667 timer->start_site = NULL;
668 timer->start_pid = -1;
669 memset(timer->start_comm, 0, TASK_COMM_LEN);
671 lockdep_init_map(&timer->lockdep_map, name, key, 0);
675 * init_timer_key - initialize a timer
676 * @timer: the timer to be initialized
677 * @flags: timer flags
678 * @name: name of the timer
679 * @key: lockdep class key of the fake lock used for tracking timer
680 * sync lock dependencies
682 * init_timer_key() must be done to a timer prior calling *any* of the
683 * other timer functions.
685 void init_timer_key(struct timer_list *timer, unsigned int flags,
686 const char *name, struct lock_class_key *key)
689 do_init_timer(timer, flags, name, key);
691 EXPORT_SYMBOL(init_timer_key);
693 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
695 struct list_head *entry = &timer->entry;
697 debug_deactivate(timer);
699 __list_del(entry->prev, entry->next);
702 entry->prev = LIST_POISON2;
706 detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
708 detach_timer(timer, true);
709 if (!tbase_get_deferrable(timer->base))
710 base->active_timers--;
712 (void)catchup_timer_jiffies(base);
715 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
718 if (!timer_pending(timer))
721 detach_timer(timer, clear_pending);
722 if (!tbase_get_deferrable(timer->base)) {
723 base->active_timers--;
724 if (timer->expires == base->next_timer)
725 base->next_timer = base->timer_jiffies;
728 (void)catchup_timer_jiffies(base);
733 * We are using hashed locking: holding per_cpu(tvec_bases).lock
734 * means that all timers which are tied to this base via timer->base are
735 * locked, and the base itself is locked too.
737 * So __run_timers/migrate_timers can safely modify all timers which could
738 * be found on ->tvX lists.
740 * When the timer's base is locked, and the timer removed from list, it is
741 * possible to set timer->base = NULL and drop the lock: the timer remains
744 static struct tvec_base *lock_timer_base(struct timer_list *timer,
745 unsigned long *flags)
746 __acquires(timer->base->lock)
748 struct tvec_base *base;
751 struct tvec_base *prelock_base = timer->base;
752 base = tbase_get_base(prelock_base);
753 if (likely(base != NULL)) {
754 spin_lock_irqsave(&base->lock, *flags);
755 if (likely(prelock_base == timer->base))
757 /* The timer has migrated to another CPU */
758 spin_unlock_irqrestore(&base->lock, *flags);
764 #ifndef CONFIG_PREEMPT_RT_FULL
765 static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
766 struct tvec_base *old,
767 struct tvec_base *new)
769 /* See the comment in lock_timer_base() */
770 timer_set_base(timer, NULL);
771 spin_unlock(&old->lock);
772 spin_lock(&new->lock);
773 timer_set_base(timer, new);
777 static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
778 struct tvec_base *old,
779 struct tvec_base *new)
782 * We cannot do the above because we might be preempted and
783 * then the preempter would see NULL and loop forever.
785 if (spin_trylock(&new->lock)) {
786 timer_set_base(timer, new);
787 spin_unlock(&old->lock);
795 __mod_timer(struct timer_list *timer, unsigned long expires,
796 bool pending_only, int pinned)
798 struct tvec_base *base, *new_base;
802 timer_stats_timer_set_start_info(timer);
803 BUG_ON(!timer->function);
805 base = lock_timer_base(timer, &flags);
807 ret = detach_if_pending(timer, base, false);
808 if (!ret && pending_only)
811 debug_activate(timer, expires);
813 cpu = get_nohz_timer_target(pinned);
814 new_base = per_cpu(tvec_bases, cpu);
816 if (base != new_base) {
818 * We are trying to schedule the timer on the local CPU.
819 * However we can't change timer's base while it is running,
820 * otherwise del_timer_sync() can't detect that the timer's
821 * handler yet has not finished. This also guarantees that
822 * the timer is serialized wrt itself.
824 if (likely(base->running_timer != timer))
825 base = switch_timer_base(timer, base, new_base);
828 timer->expires = expires;
829 internal_add_timer(base, timer);
832 spin_unlock_irqrestore(&base->lock, flags);
838 * mod_timer_pending - modify a pending timer's timeout
839 * @timer: the pending timer to be modified
840 * @expires: new timeout in jiffies
842 * mod_timer_pending() is the same for pending timers as mod_timer(),
843 * but will not re-activate and modify already deleted timers.
845 * It is useful for unserialized use of timers.
847 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
849 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
851 EXPORT_SYMBOL(mod_timer_pending);
854 * Decide where to put the timer while taking the slack into account
857 * 1) calculate the maximum (absolute) time
858 * 2) calculate the highest bit where the expires and new max are different
859 * 3) use this bit to make a mask
860 * 4) use the bitmask to round down the maximum time, so that all last
864 unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
866 unsigned long expires_limit, mask;
869 if (timer->slack >= 0) {
870 expires_limit = expires + timer->slack;
872 long delta = expires - jiffies;
877 expires_limit = expires + delta / 256;
879 mask = expires ^ expires_limit;
883 bit = find_last_bit(&mask, BITS_PER_LONG);
885 mask = (1UL << bit) - 1;
887 expires_limit = expires_limit & ~(mask);
889 return expires_limit;
893 * mod_timer - modify a timer's timeout
894 * @timer: the timer to be modified
895 * @expires: new timeout in jiffies
897 * mod_timer() is a more efficient way to update the expire field of an
898 * active timer (if the timer is inactive it will be activated)
900 * mod_timer(timer, expires) is equivalent to:
902 * del_timer(timer); timer->expires = expires; add_timer(timer);
904 * Note that if there are multiple unserialized concurrent users of the
905 * same timer, then mod_timer() is the only safe way to modify the timeout,
906 * since add_timer() cannot modify an already running timer.
908 * The function returns whether it has modified a pending timer or not.
909 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
910 * active timer returns 1.)
912 int mod_timer(struct timer_list *timer, unsigned long expires)
914 expires = apply_slack(timer, expires);
917 * This is a common optimization triggered by the
918 * networking code - if the timer is re-modified
919 * to be the same thing then just return:
921 if (timer_pending(timer) && timer->expires == expires)
924 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
926 EXPORT_SYMBOL(mod_timer);
929 * mod_timer_pinned - modify a timer's timeout
930 * @timer: the timer to be modified
931 * @expires: new timeout in jiffies
933 * mod_timer_pinned() is a way to update the expire field of an
934 * active timer (if the timer is inactive it will be activated)
935 * and to ensure that the timer is scheduled on the current CPU.
937 * Note that this does not prevent the timer from being migrated
938 * when the current CPU goes offline. If this is a problem for
939 * you, use CPU-hotplug notifiers to handle it correctly, for
940 * example, cancelling the timer when the corresponding CPU goes
943 * mod_timer_pinned(timer, expires) is equivalent to:
945 * del_timer(timer); timer->expires = expires; add_timer(timer);
947 int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
949 if (timer->expires == expires && timer_pending(timer))
952 return __mod_timer(timer, expires, false, TIMER_PINNED);
954 EXPORT_SYMBOL(mod_timer_pinned);
957 * add_timer - start a timer
958 * @timer: the timer to be added
960 * The kernel will do a ->function(->data) callback from the
961 * timer interrupt at the ->expires point in the future. The
962 * current time is 'jiffies'.
964 * The timer's ->expires, ->function (and if the handler uses it, ->data)
965 * fields must be set prior calling this function.
967 * Timers with an ->expires field in the past will be executed in the next
970 void add_timer(struct timer_list *timer)
972 BUG_ON(timer_pending(timer));
973 mod_timer(timer, timer->expires);
975 EXPORT_SYMBOL(add_timer);
978 * add_timer_on - start a timer on a particular CPU
979 * @timer: the timer to be added
980 * @cpu: the CPU to start it on
982 * This is not very scalable on SMP. Double adds are not possible.
984 void add_timer_on(struct timer_list *timer, int cpu)
986 struct tvec_base *base = per_cpu(tvec_bases, cpu);
989 timer_stats_timer_set_start_info(timer);
990 BUG_ON(timer_pending(timer) || !timer->function);
991 spin_lock_irqsave(&base->lock, flags);
992 timer_set_base(timer, base);
993 debug_activate(timer, timer->expires);
994 internal_add_timer(base, timer);
995 spin_unlock_irqrestore(&base->lock, flags);
997 EXPORT_SYMBOL_GPL(add_timer_on);
999 #ifdef CONFIG_PREEMPT_RT_FULL
1001 * Wait for a running timer
1003 static void wait_for_running_timer(struct timer_list *timer)
1005 struct tvec_base *base = timer->base;
1007 if (base->running_timer == timer)
1008 wait_event(base->wait_for_running_timer,
1009 base->running_timer != timer);
1012 # define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
1014 static inline void wait_for_running_timer(struct timer_list *timer)
1019 # define wakeup_timer_waiters(b) do { } while (0)
1023 * del_timer - deactive a timer.
1024 * @timer: the timer to be deactivated
1026 * del_timer() deactivates a timer - this works on both active and inactive
1029 * The function returns whether it has deactivated a pending timer or not.
1030 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
1031 * active timer returns 1.)
1033 int del_timer(struct timer_list *timer)
1035 struct tvec_base *base;
1036 unsigned long flags;
1039 debug_assert_init(timer);
1041 timer_stats_timer_clear_start_info(timer);
1042 if (timer_pending(timer)) {
1043 base = lock_timer_base(timer, &flags);
1044 ret = detach_if_pending(timer, base, true);
1045 spin_unlock_irqrestore(&base->lock, flags);
1050 EXPORT_SYMBOL(del_timer);
1053 * try_to_del_timer_sync - Try to deactivate a timer
1054 * @timer: timer do del
1056 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1057 * exit the timer is not queued and the handler is not running on any CPU.
1059 int try_to_del_timer_sync(struct timer_list *timer)
1061 struct tvec_base *base;
1062 unsigned long flags;
1065 debug_assert_init(timer);
1067 base = lock_timer_base(timer, &flags);
1069 if (base->running_timer != timer) {
1070 timer_stats_timer_clear_start_info(timer);
1071 ret = detach_if_pending(timer, base, true);
1073 spin_unlock_irqrestore(&base->lock, flags);
1077 EXPORT_SYMBOL(try_to_del_timer_sync);
1079 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
1081 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1082 * @timer: the timer to be deactivated
1084 * This function only differs from del_timer() on SMP: besides deactivating
1085 * the timer it also makes sure the handler has finished executing on other
1088 * Synchronization rules: Callers must prevent restarting of the timer,
1089 * otherwise this function is meaningless. It must not be called from
1090 * interrupt contexts unless the timer is an irqsafe one. The caller must
1091 * not hold locks which would prevent completion of the timer's
1092 * handler. The timer's handler must not call add_timer_on(). Upon exit the
1093 * timer is not queued and the handler is not running on any CPU.
1095 * Note: For !irqsafe timers, you must not hold locks that are held in
1096 * interrupt context while calling this function. Even if the lock has
1097 * nothing to do with the timer in question. Here's why:
1103 * base->running_timer = mytimer;
1104 * spin_lock_irq(somelock);
1106 * spin_lock(somelock);
1107 * del_timer_sync(mytimer);
1108 * while (base->running_timer == mytimer);
1110 * Now del_timer_sync() will never return and never release somelock.
1111 * The interrupt on the other CPU is waiting to grab somelock but
1112 * it has interrupted the softirq that CPU0 is waiting to finish.
1114 * The function returns whether it has deactivated a pending timer or not.
1116 int del_timer_sync(struct timer_list *timer)
1118 #ifdef CONFIG_LOCKDEP
1119 unsigned long flags;
1122 * If lockdep gives a backtrace here, please reference
1123 * the synchronization rules above.
1125 local_irq_save(flags);
1126 lock_map_acquire(&timer->lockdep_map);
1127 lock_map_release(&timer->lockdep_map);
1128 local_irq_restore(flags);
1131 * don't use it in hardirq context, because it
1132 * could lead to deadlock.
1134 WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
1136 int ret = try_to_del_timer_sync(timer);
1139 wait_for_running_timer(timer);
1142 EXPORT_SYMBOL(del_timer_sync);
1145 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1147 /* cascade all the timers from tv up one level */
1148 struct timer_list *timer, *tmp;
1149 struct list_head tv_list;
1151 list_replace_init(tv->vec + index, &tv_list);
1154 * We are removing _all_ timers from the list, so we
1155 * don't have to detach them individually.
1157 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1158 BUG_ON(tbase_get_base(timer->base) != base);
1159 /* No accounting, while moving them */
1160 __internal_add_timer(base, timer);
1166 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1169 int count = preempt_count();
1171 #ifdef CONFIG_LOCKDEP
1173 * It is permissible to free the timer from inside the
1174 * function that is called from it, this we need to take into
1175 * account for lockdep too. To avoid bogus "held lock freed"
1176 * warnings as well as problems when looking into
1177 * timer->lockdep_map, make a copy and use that here.
1179 struct lockdep_map lockdep_map;
1181 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1184 * Couple the lock chain with the lock chain at
1185 * del_timer_sync() by acquiring the lock_map around the fn()
1186 * call here and in del_timer_sync().
1188 lock_map_acquire(&lockdep_map);
1190 trace_timer_expire_entry(timer);
1192 trace_timer_expire_exit(timer);
1194 lock_map_release(&lockdep_map);
1196 if (count != preempt_count()) {
1197 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1198 fn, count, preempt_count());
1200 * Restore the preempt count. That gives us a decent
1201 * chance to survive and extract information. If the
1202 * callback kept a lock held, bad luck, but not worse
1203 * than the BUG() we had.
1205 preempt_count_set(count);
1209 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1212 * __run_timers - run all expired timers (if any) on this CPU.
1213 * @base: the timer vector to be processed.
1215 * This function cascades all vectors and executes all expired timer
1218 static inline void __run_timers(struct tvec_base *base)
1220 struct timer_list *timer;
1222 spin_lock_irq(&base->lock);
1223 if (catchup_timer_jiffies(base)) {
1224 spin_unlock_irq(&base->lock);
1227 while (time_after_eq(jiffies, base->timer_jiffies)) {
1228 struct list_head work_list;
1229 struct list_head *head = &work_list;
1230 int index = base->timer_jiffies & TVR_MASK;
1236 (!cascade(base, &base->tv2, INDEX(0))) &&
1237 (!cascade(base, &base->tv3, INDEX(1))) &&
1238 !cascade(base, &base->tv4, INDEX(2)))
1239 cascade(base, &base->tv5, INDEX(3));
1240 ++base->timer_jiffies;
1241 list_replace_init(base->tv1.vec + index, head);
1242 while (!list_empty(head)) {
1243 void (*fn)(unsigned long);
1247 timer = list_first_entry(head, struct timer_list,entry);
1248 fn = timer->function;
1250 irqsafe = tbase_get_irqsafe(timer->base);
1252 timer_stats_account_timer(timer);
1254 base->running_timer = timer;
1255 detach_expired_timer(timer, base);
1258 spin_unlock(&base->lock);
1259 call_timer_fn(timer, fn, data);
1260 base->running_timer = NULL;
1261 spin_lock(&base->lock);
1263 spin_unlock_irq(&base->lock);
1264 call_timer_fn(timer, fn, data);
1265 base->running_timer = NULL;
1266 spin_lock_irq(&base->lock);
1270 wakeup_timer_waiters(base);
1271 spin_unlock_irq(&base->lock);
1274 #ifdef CONFIG_NO_HZ_COMMON
1276 * Find out when the next timer event is due to happen. This
1277 * is used on S/390 to stop all activity when a CPU is idle.
1278 * This function needs to be called with interrupts disabled.
1280 static unsigned long __next_timer_interrupt(struct tvec_base *base)
1282 unsigned long timer_jiffies = base->timer_jiffies;
1283 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1284 int index, slot, array, found = 0;
1285 struct timer_list *nte;
1286 struct tvec *varray[4];
1288 /* Look for timer events in tv1. */
1289 index = slot = timer_jiffies & TVR_MASK;
1291 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
1292 if (tbase_get_deferrable(nte->base))
1296 expires = nte->expires;
1297 /* Look at the cascade bucket(s)? */
1298 if (!index || slot < index)
1302 slot = (slot + 1) & TVR_MASK;
1303 } while (slot != index);
1306 /* Calculate the next cascade event */
1308 timer_jiffies += TVR_SIZE - index;
1309 timer_jiffies >>= TVR_BITS;
1311 /* Check tv2-tv5. */
1312 varray[0] = &base->tv2;
1313 varray[1] = &base->tv3;
1314 varray[2] = &base->tv4;
1315 varray[3] = &base->tv5;
1317 for (array = 0; array < 4; array++) {
1318 struct tvec *varp = varray[array];
1320 index = slot = timer_jiffies & TVN_MASK;
1322 list_for_each_entry(nte, varp->vec + slot, entry) {
1323 if (tbase_get_deferrable(nte->base))
1327 if (time_before(nte->expires, expires))
1328 expires = nte->expires;
1331 * Do we still search for the first timer or are
1332 * we looking up the cascade buckets ?
1335 /* Look at the cascade bucket(s)? */
1336 if (!index || slot < index)
1340 slot = (slot + 1) & TVN_MASK;
1341 } while (slot != index);
1344 timer_jiffies += TVN_SIZE - index;
1345 timer_jiffies >>= TVN_BITS;
1351 * Check, if the next hrtimer event is before the next timer wheel
1354 static unsigned long cmp_next_hrtimer_event(unsigned long now,
1355 unsigned long expires)
1357 ktime_t hr_delta = hrtimer_get_next_event();
1358 struct timespec tsdelta;
1359 unsigned long delta;
1361 if (hr_delta.tv64 == KTIME_MAX)
1365 * Expired timer available, let it expire in the next tick
1367 if (hr_delta.tv64 <= 0)
1370 tsdelta = ktime_to_timespec(hr_delta);
1371 delta = timespec_to_jiffies(&tsdelta);
1374 * Limit the delta to the max value, which is checked in
1375 * tick_nohz_stop_sched_tick():
1377 if (delta > NEXT_TIMER_MAX_DELTA)
1378 delta = NEXT_TIMER_MAX_DELTA;
1381 * Take rounding errors in to account and make sure, that it
1382 * expires in the next tick. Otherwise we go into an endless
1383 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1389 if (time_before(now, expires))
1395 * get_next_timer_interrupt - return the jiffy of the next pending timer
1396 * @now: current time (in jiffies)
1398 unsigned long get_next_timer_interrupt(unsigned long now)
1400 struct tvec_base *base = __this_cpu_read(tvec_bases);
1401 unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
1404 * Pretend that there is no timer pending if the cpu is offline.
1405 * Possible pending timers will be migrated later to an active cpu.
1407 if (cpu_is_offline(smp_processor_id()))
1410 #ifdef CONFIG_PREEMPT_RT_FULL
1412 * On PREEMPT_RT we cannot sleep here. If the trylock does not
1413 * succeed then we return the worst-case 'expires in 1 tick'
1414 * value. We use the rt functions here directly to avoid a
1415 * migrate_disable() call.
1417 if (!spin_do_trylock(&base->lock))
1420 spin_lock(&base->lock);
1422 if (base->active_timers) {
1423 if (time_before_eq(base->next_timer, base->timer_jiffies))
1424 base->next_timer = __next_timer_interrupt(base);
1425 expires = base->next_timer;
1427 #ifdef CONFIG_PREEMPT_RT_FULL
1428 rt_spin_unlock_after_trylock_in_irq(&base->lock);
1430 spin_unlock(&base->lock);
1433 if (time_before_eq(expires, now))
1435 return cmp_next_hrtimer_event(now, expires);
1440 * Called from the timer interrupt handler to charge one tick to the current
1441 * process. user_tick is 1 if the tick is user time, 0 for system.
1443 void update_process_times(int user_tick)
1445 struct task_struct *p = current;
1447 /* Note: this timer irq context must be accounted for as well. */
1448 account_process_tick(p, user_tick);
1451 rcu_check_callbacks(user_tick);
1452 #if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
1456 run_posix_cpu_timers(p);
1460 * This function runs timers and the timer-tq in bottom half context.
1462 static void run_timer_softirq(struct softirq_action *h)
1464 struct tvec_base *base = __this_cpu_read(tvec_bases);
1466 hrtimer_run_pending();
1468 #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
1472 if (time_after_eq(jiffies, base->timer_jiffies))
1477 * Called by the local, per-CPU timer interrupt on SMP.
1479 void run_local_timers(void)
1481 hrtimer_run_queues();
1482 raise_softirq(TIMER_SOFTIRQ);
1485 #ifdef __ARCH_WANT_SYS_ALARM
1488 * For backwards compatibility? This can be done in libc so Alpha
1489 * and all newer ports shouldn't need it.
1491 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1493 return alarm_setitimer(seconds);
1498 static void process_timeout(unsigned long __data)
1500 wake_up_process((struct task_struct *)__data);
1504 * schedule_timeout - sleep until timeout
1505 * @timeout: timeout value in jiffies
1507 * Make the current task sleep until @timeout jiffies have
1508 * elapsed. The routine will return immediately unless
1509 * the current task state has been set (see set_current_state()).
1511 * You can set the task state as follows -
1513 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1514 * pass before the routine returns. The routine will return 0
1516 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1517 * delivered to the current task. In this case the remaining time
1518 * in jiffies will be returned, or 0 if the timer expired in time
1520 * The current task state is guaranteed to be TASK_RUNNING when this
1523 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1524 * the CPU away without a bound on the timeout. In this case the return
1525 * value will be %MAX_SCHEDULE_TIMEOUT.
1527 * In all cases the return value is guaranteed to be non-negative.
1529 signed long __sched schedule_timeout(signed long timeout)
1531 struct timer_list timer;
1532 unsigned long expire;
1536 case MAX_SCHEDULE_TIMEOUT:
1538 * These two special cases are useful to be comfortable
1539 * in the caller. Nothing more. We could take
1540 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1541 * but I' d like to return a valid offset (>=0) to allow
1542 * the caller to do everything it want with the retval.
1548 * Another bit of PARANOID. Note that the retval will be
1549 * 0 since no piece of kernel is supposed to do a check
1550 * for a negative retval of schedule_timeout() (since it
1551 * should never happens anyway). You just have the printk()
1552 * that will tell you if something is gone wrong and where.
1555 printk(KERN_ERR "schedule_timeout: wrong timeout "
1556 "value %lx\n", timeout);
1558 current->state = TASK_RUNNING;
1563 expire = timeout + jiffies;
1565 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1566 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1568 del_singleshot_timer_sync(&timer);
1570 /* Remove the timer from the object tracker */
1571 destroy_timer_on_stack(&timer);
1573 timeout = expire - jiffies;
1576 return timeout < 0 ? 0 : timeout;
1578 EXPORT_SYMBOL(schedule_timeout);
1581 * We can use __set_current_state() here because schedule_timeout() calls
1582 * schedule() unconditionally.
1584 signed long __sched schedule_timeout_interruptible(signed long timeout)
1586 __set_current_state(TASK_INTERRUPTIBLE);
1587 return schedule_timeout(timeout);
1589 EXPORT_SYMBOL(schedule_timeout_interruptible);
1591 signed long __sched schedule_timeout_killable(signed long timeout)
1593 __set_current_state(TASK_KILLABLE);
1594 return schedule_timeout(timeout);
1596 EXPORT_SYMBOL(schedule_timeout_killable);
1598 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1600 __set_current_state(TASK_UNINTERRUPTIBLE);
1601 return schedule_timeout(timeout);
1603 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1605 static int init_timers_cpu(int cpu)
1608 struct tvec_base *base;
1609 static char tvec_base_done[NR_CPUS];
1611 if (!tvec_base_done[cpu]) {
1612 static char boot_done;
1616 * The APs use this path later in boot
1618 base = kzalloc_node(sizeof(*base), GFP_KERNEL,
1623 /* Make sure tvec_base has TIMER_FLAG_MASK bits free */
1624 if (WARN_ON(base != tbase_get_base(base))) {
1628 per_cpu(tvec_bases, cpu) = base;
1631 * This is for the boot CPU - we use compile-time
1632 * static initialisation because per-cpu memory isn't
1633 * ready yet and because the memory allocators are not
1634 * initialised either.
1637 base = &boot_tvec_bases;
1639 spin_lock_init(&base->lock);
1640 tvec_base_done[cpu] = 1;
1643 base = per_cpu(tvec_bases, cpu);
1646 #ifdef CONFIG_PREEMPT_RT_FULL
1647 init_waitqueue_head(&base->wait_for_running_timer);
1650 for (j = 0; j < TVN_SIZE; j++) {
1651 INIT_LIST_HEAD(base->tv5.vec + j);
1652 INIT_LIST_HEAD(base->tv4.vec + j);
1653 INIT_LIST_HEAD(base->tv3.vec + j);
1654 INIT_LIST_HEAD(base->tv2.vec + j);
1656 for (j = 0; j < TVR_SIZE; j++)
1657 INIT_LIST_HEAD(base->tv1.vec + j);
1659 base->timer_jiffies = jiffies;
1660 base->next_timer = base->timer_jiffies;
1661 base->active_timers = 0;
1662 base->all_timers = 0;
1666 #ifdef CONFIG_HOTPLUG_CPU
1667 static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1669 struct timer_list *timer;
1671 while (!list_empty(head)) {
1672 timer = list_first_entry(head, struct timer_list, entry);
1673 /* We ignore the accounting on the dying cpu */
1674 detach_timer(timer, false);
1675 timer_set_base(timer, new_base);
1676 internal_add_timer(new_base, timer);
1680 static void migrate_timers(int cpu)
1682 struct tvec_base *old_base;
1683 struct tvec_base *new_base;
1686 BUG_ON(cpu_online(cpu));
1687 old_base = per_cpu(tvec_bases, cpu);
1688 new_base = get_local_var(tvec_bases);
1690 * The caller is globally serialized and nobody else
1691 * takes two locks at once, deadlock is not possible.
1693 spin_lock_irq(&new_base->lock);
1694 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1696 BUG_ON(old_base->running_timer);
1698 for (i = 0; i < TVR_SIZE; i++)
1699 migrate_timer_list(new_base, old_base->tv1.vec + i);
1700 for (i = 0; i < TVN_SIZE; i++) {
1701 migrate_timer_list(new_base, old_base->tv2.vec + i);
1702 migrate_timer_list(new_base, old_base->tv3.vec + i);
1703 migrate_timer_list(new_base, old_base->tv4.vec + i);
1704 migrate_timer_list(new_base, old_base->tv5.vec + i);
1707 spin_unlock(&old_base->lock);
1708 spin_unlock_irq(&new_base->lock);
1709 put_local_var(tvec_bases);
1711 #endif /* CONFIG_HOTPLUG_CPU */
1713 static int timer_cpu_notify(struct notifier_block *self,
1714 unsigned long action, void *hcpu)
1716 long cpu = (long)hcpu;
1720 case CPU_UP_PREPARE:
1721 case CPU_UP_PREPARE_FROZEN:
1722 err = init_timers_cpu(cpu);
1724 return notifier_from_errno(err);
1726 #ifdef CONFIG_HOTPLUG_CPU
1728 case CPU_DEAD_FROZEN:
1729 migrate_timers(cpu);
1738 static struct notifier_block timers_nb = {
1739 .notifier_call = timer_cpu_notify,
1743 void __init init_timers(void)
1747 /* ensure there are enough low bits for flags in timer->base pointer */
1748 BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
1750 err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1751 (void *)(long)smp_processor_id());
1752 BUG_ON(err != NOTIFY_OK);
1755 register_cpu_notifier(&timers_nb);
1756 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1760 * msleep - sleep safely even with waitqueue interruptions
1761 * @msecs: Time in milliseconds to sleep for
1763 void msleep(unsigned int msecs)
1765 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1768 timeout = schedule_timeout_uninterruptible(timeout);
1771 EXPORT_SYMBOL(msleep);
1774 * msleep_interruptible - sleep waiting for signals
1775 * @msecs: Time in milliseconds to sleep for
1777 unsigned long msleep_interruptible(unsigned int msecs)
1779 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1781 while (timeout && !signal_pending(current))
1782 timeout = schedule_timeout_interruptible(timeout);
1783 return jiffies_to_msecs(timeout);
1786 EXPORT_SYMBOL(msleep_interruptible);
1788 static int __sched do_usleep_range(unsigned long min, unsigned long max)
1791 unsigned long delta;
1793 kmin = ktime_set(0, min * NSEC_PER_USEC);
1794 delta = (max - min) * NSEC_PER_USEC;
1795 return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1799 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1800 * @min: Minimum time in usecs to sleep
1801 * @max: Maximum time in usecs to sleep
1803 void usleep_range(unsigned long min, unsigned long max)
1805 __set_current_state(TASK_UNINTERRUPTIBLE);
1806 do_usleep_range(min, max);
1808 EXPORT_SYMBOL(usleep_range);