2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/irq.h>
22 #include <linux/time.h>
23 #include <linux/ioport.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/sysdev.h>
27 #include <linux/bcd.h>
28 #include <linux/kallsyms.h>
29 #include <asm/8253pit.h>
30 #include <asm/pgtable.h>
31 #include <asm/vsyscall.h>
32 #include <asm/timex.h>
33 #include <asm/proto.h>
35 #include <asm/sections.h>
36 #include <linux/cpufreq.h>
37 #include <linux/hpet.h>
38 #ifdef CONFIG_X86_LOCAL_APIC
42 u64 jiffies_64 = INITIAL_JIFFIES;
44 EXPORT_SYMBOL(jiffies_64);
46 #ifdef CONFIG_CPU_FREQ
47 static void cpufreq_delayed_get(void);
49 extern void i8254_timer_resume(void);
50 extern int using_apic_timer;
52 DEFINE_SPINLOCK(rtc_lock);
53 DEFINE_SPINLOCK(i8253_lock);
55 static int nohpet __initdata = 0;
56 static int notsc __initdata = 0;
58 #undef HPET_HACK_ENABLE_DANGEROUS
60 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
61 static unsigned long hpet_period; /* fsecs / HPET clock */
62 unsigned long hpet_tick; /* HPET clocks / interrupt */
63 unsigned long vxtime_hz = PIT_TICK_RATE;
64 int report_lost_ticks; /* command line option */
65 unsigned long long monotonic_base;
67 struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
69 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
70 unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
71 struct timespec __xtime __section_xtime;
72 struct timezone __sys_tz __section_sys_tz;
74 static inline void rdtscll_sync(unsigned long *tsc)
83 * do_gettimeoffset() returns microseconds since last timer interrupt was
84 * triggered by hardware. A memory read of HPET is slower than a register read
85 * of TSC, but much more reliable. It's also synchronized to the timer
86 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
87 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
88 * This is not a problem, because jiffies hasn't updated either. They are bound
89 * together by xtime_lock.
92 static inline unsigned int do_gettimeoffset_tsc(void)
97 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
98 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
102 static inline unsigned int do_gettimeoffset_hpet(void)
104 return ((hpet_readl(HPET_COUNTER) - vxtime.last) * vxtime.quot) >> 32;
107 unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
110 * This version of gettimeofday() has microsecond resolution and better than
111 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
115 void do_gettimeofday(struct timeval *tv)
117 unsigned long seq, t;
118 unsigned int sec, usec;
121 seq = read_seqbegin(&xtime_lock);
124 usec = xtime.tv_nsec / 1000;
126 /* i386 does some correction here to keep the clock
127 monotonous even when ntpd is fixing drift.
128 But they didn't work for me, there is a non monotonic
129 clock anyways with ntp.
130 I dropped all corrections now until a real solution can
131 be found. Note when you fix it here you need to do the same
132 in arch/x86_64/kernel/vsyscall.c and export all needed
133 variables in vmlinux.lds. -AK */
135 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
139 } while (read_seqretry(&xtime_lock, seq));
141 tv->tv_sec = sec + usec / 1000000;
142 tv->tv_usec = usec % 1000000;
145 EXPORT_SYMBOL(do_gettimeofday);
148 * settimeofday() first undoes the correction that gettimeofday would do
149 * on the time, and then saves it. This is ugly, but has been like this for
153 int do_settimeofday(struct timespec *tv)
155 time_t wtm_sec, sec = tv->tv_sec;
156 long wtm_nsec, nsec = tv->tv_nsec;
158 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
161 write_seqlock_irq(&xtime_lock);
163 nsec -= do_gettimeoffset() * 1000 +
164 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
166 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
167 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
169 set_normalized_timespec(&xtime, sec, nsec);
170 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
172 time_adjust = 0; /* stop active adjtime() */
173 time_status |= STA_UNSYNC;
174 time_maxerror = NTP_PHASE_LIMIT;
175 time_esterror = NTP_PHASE_LIMIT;
177 write_sequnlock_irq(&xtime_lock);
182 EXPORT_SYMBOL(do_settimeofday);
184 unsigned long profile_pc(struct pt_regs *regs)
186 unsigned long pc = instruction_pointer(regs);
188 /* Assume the lock function has either no stack frame or only a single word.
189 This checks if the address on the stack looks like a kernel text address.
190 There is a small window for false hits, but in that case the tick
191 is just accounted to the spinlock function.
192 Better would be to write these functions in assembler again
193 and check exactly. */
194 if (in_lock_functions(pc)) {
195 char *v = *(char **)regs->rsp;
196 if ((v >= _stext && v <= _etext) ||
197 (v >= _sinittext && v <= _einittext) ||
198 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
199 return (unsigned long)v;
200 return ((unsigned long *)regs->rsp)[1];
204 EXPORT_SYMBOL(profile_pc);
207 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
208 * ms after the second nowtime has started, because when nowtime is written
209 * into the registers of the CMOS clock, it will jump to the next second
210 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
214 static void set_rtc_mmss(unsigned long nowtime)
216 int real_seconds, real_minutes, cmos_minutes;
217 unsigned char control, freq_select;
220 * IRQs are disabled when we're called from the timer interrupt,
221 * no need for spin_lock_irqsave()
224 spin_lock(&rtc_lock);
227 * Tell the clock it's being set and stop it.
230 control = CMOS_READ(RTC_CONTROL);
231 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
233 freq_select = CMOS_READ(RTC_FREQ_SELECT);
234 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
236 cmos_minutes = CMOS_READ(RTC_MINUTES);
237 BCD_TO_BIN(cmos_minutes);
240 * since we're only adjusting minutes and seconds, don't interfere with hour
241 * overflow. This avoids messing with unknown time zones but requires your RTC
242 * not to be off by more than 15 minutes. Since we're calling it only when
243 * our clock is externally synchronized using NTP, this shouldn't be a problem.
246 real_seconds = nowtime % 60;
247 real_minutes = nowtime / 60;
248 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
249 real_minutes += 30; /* correct for half hour time zone */
253 /* AMD 8111 is a really bad time keeper and hits this regularly.
254 It probably was an attempt to avoid screwing up DST, but ignore
256 if (abs(real_minutes - cmos_minutes) >= 30) {
257 printk(KERN_WARNING "time.c: can't update CMOS clock "
258 "from %d to %d\n", cmos_minutes, real_minutes);
263 BIN_TO_BCD(real_seconds);
264 BIN_TO_BCD(real_minutes);
265 CMOS_WRITE(real_seconds, RTC_SECONDS);
266 CMOS_WRITE(real_minutes, RTC_MINUTES);
270 * The following flags have to be released exactly in this order, otherwise the
271 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
272 * not reset the oscillator and will not update precisely 500 ms later. You
273 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
274 * believes data sheets anyway ... -- Markus Kuhn
277 CMOS_WRITE(control, RTC_CONTROL);
278 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
280 spin_unlock(&rtc_lock);
284 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
285 * Note: This function is required to return accurate
286 * time even in the absence of multiple timer ticks.
288 unsigned long long monotonic_clock(void)
291 u32 last_offset, this_offset, offset;
292 unsigned long long base;
294 if (vxtime.mode == VXTIME_HPET) {
296 seq = read_seqbegin(&xtime_lock);
298 last_offset = vxtime.last;
299 base = monotonic_base;
300 this_offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
302 } while (read_seqretry(&xtime_lock, seq));
303 offset = (this_offset - last_offset);
304 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
305 return base + offset;
308 seq = read_seqbegin(&xtime_lock);
310 last_offset = vxtime.last_tsc;
311 base = monotonic_base;
312 } while (read_seqretry(&xtime_lock, seq));
314 rdtscll(this_offset);
315 offset = (this_offset - last_offset)*1000/cpu_khz;
316 return base + offset;
321 EXPORT_SYMBOL(monotonic_clock);
323 static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
325 static long lost_count;
328 if (report_lost_ticks) {
329 printk(KERN_WARNING "time.c: Lost %d timer "
331 print_symbol("rip %s)\n", regs->rip);
334 if (lost_count == 1000 && !warned) {
336 "warning: many lost ticks.\n"
337 KERN_WARNING "Your time source seems to be instable or "
338 "some driver is hogging interupts\n");
339 print_symbol("rip %s\n", regs->rip);
340 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
341 printk(KERN_WARNING "Falling back to HPET\n");
342 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
343 vxtime.mode = VXTIME_HPET;
344 do_gettimeoffset = do_gettimeoffset_hpet;
346 /* else should fall back to PIT, but code missing. */
351 #ifdef CONFIG_CPU_FREQ
352 /* In some cases the CPU can change frequency without us noticing
353 (like going into thermal throttle)
354 Give cpufreq a change to catch up. */
355 if ((lost_count+1) % 25 == 0) {
356 cpufreq_delayed_get();
361 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
363 static unsigned long rtc_update = 0;
365 int delay, offset = 0, lost = 0;
368 * Here we are in the timer irq handler. We have irqs locally disabled (so we
369 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
370 * on the other CPU, so we need a lock. We also need to lock the vsyscall
371 * variables, because both do_timer() and us change them -arca+vojtech
374 write_seqlock(&xtime_lock);
376 if (vxtime.hpet_address) {
377 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
378 delay = hpet_readl(HPET_COUNTER) - offset;
380 spin_lock(&i8253_lock);
383 delay |= inb(0x40) << 8;
384 spin_unlock(&i8253_lock);
385 delay = LATCH - 1 - delay;
390 if (vxtime.mode == VXTIME_HPET) {
391 if (offset - vxtime.last > hpet_tick) {
392 lost = (offset - vxtime.last) / hpet_tick - 1;
396 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
398 vxtime.last = offset;
400 offset = (((tsc - vxtime.last_tsc) *
401 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
406 if (offset > (USEC_PER_SEC / HZ)) {
407 lost = offset / (USEC_PER_SEC / HZ);
408 offset %= (USEC_PER_SEC / HZ);
411 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
413 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
415 if ((((tsc - vxtime.last_tsc) *
416 vxtime.tsc_quot) >> 32) < offset)
417 vxtime.last_tsc = tsc -
418 (((long) offset << 32) / vxtime.tsc_quot) - 1;
422 handle_lost_ticks(lost, regs);
427 * Do the timer stuff.
432 update_process_times(user_mode(regs));
436 * In the SMP case we use the local APIC timer interrupt to do the profiling,
437 * except when we simulate SMP mode on a uniprocessor system, in that case we
438 * have to call the local interrupt handler.
441 #ifndef CONFIG_X86_LOCAL_APIC
442 profile_tick(CPU_PROFILING, regs);
444 if (!using_apic_timer)
445 smp_local_timer_interrupt(regs);
449 * If we have an externally synchronized Linux clock, then update CMOS clock
450 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
451 * closest to exactly 500 ms before the next second. If the update fails, we
452 * don't care, as it'll be updated on the next turn, and the problem (time way
453 * off) isn't likely to go away much sooner anyway.
456 if ((~time_status & STA_UNSYNC) && xtime.tv_sec > rtc_update &&
457 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
458 set_rtc_mmss(xtime.tv_sec);
459 rtc_update = xtime.tv_sec + 660;
462 write_sequnlock(&xtime_lock);
467 static unsigned int cyc2ns_scale;
468 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
470 static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
472 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
475 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
477 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
480 unsigned long long sched_clock(void)
485 /* Don't do a HPET read here. Using TSC always is much faster
486 and HPET may not be mapped yet when the scheduler first runs.
487 Disadvantage is a small drift between CPUs in some configurations,
488 but that should be tolerable. */
489 if (__vxtime.mode == VXTIME_HPET)
490 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
493 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
494 which means it is not completely exact and may not be monotonous between
495 CPUs. But the errors should be too small to matter for scheduling
499 return cycles_2_ns(a);
502 unsigned long get_cmos_time(void)
504 unsigned int timeout, year, mon, day, hour, min, sec;
505 unsigned char last, this;
509 * The Linux interpretation of the CMOS clock register contents: When the
510 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
511 * second which has precisely just started. Waiting for this can take up to 1
512 * second, we timeout approximately after 2.4 seconds on a machine with
513 * standard 8.3 MHz ISA bus.
516 spin_lock_irqsave(&rtc_lock, flags);
521 while (timeout && last && !this) {
523 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
528 * Here we are safe to assume the registers won't change for a whole second, so
529 * we just go ahead and read them.
532 sec = CMOS_READ(RTC_SECONDS);
533 min = CMOS_READ(RTC_MINUTES);
534 hour = CMOS_READ(RTC_HOURS);
535 day = CMOS_READ(RTC_DAY_OF_MONTH);
536 mon = CMOS_READ(RTC_MONTH);
537 year = CMOS_READ(RTC_YEAR);
539 spin_unlock_irqrestore(&rtc_lock, flags);
542 * We know that x86-64 always uses BCD format, no need to check the config
554 * x86-64 systems only exists since 2002.
555 * This will work up to Dec 31, 2100
559 return mktime(year, mon, day, hour, min, sec);
562 #ifdef CONFIG_CPU_FREQ
564 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
567 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
568 not that important because current Opteron setups do not support
569 scaling on SMP anyroads.
571 Should fix up last_tsc too. Currently gettimeofday in the
572 first tick after the change will be slightly wrong. */
574 #include <linux/workqueue.h>
576 static unsigned int cpufreq_delayed_issched = 0;
577 static unsigned int cpufreq_init = 0;
578 static struct work_struct cpufreq_delayed_get_work;
580 static void handle_cpufreq_delayed_get(void *v)
583 for_each_online_cpu(cpu) {
586 cpufreq_delayed_issched = 0;
589 /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
590 * to verify the CPU frequency the timing core thinks the CPU is running
591 * at is still correct.
593 static void cpufreq_delayed_get(void)
596 if (cpufreq_init && !cpufreq_delayed_issched) {
597 cpufreq_delayed_issched = 1;
600 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
602 schedule_work(&cpufreq_delayed_get_work);
606 static unsigned int ref_freq = 0;
607 static unsigned long loops_per_jiffy_ref = 0;
609 static unsigned long cpu_khz_ref = 0;
611 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
614 struct cpufreq_freqs *freq = data;
615 unsigned long *lpj, dummy;
618 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
620 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
622 lpj = &boot_cpu_data.loops_per_jiffy;
628 ref_freq = freq->old;
629 loops_per_jiffy_ref = *lpj;
630 cpu_khz_ref = cpu_khz;
632 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
633 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
634 (val == CPUFREQ_RESUMECHANGE)) {
636 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
638 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
639 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
640 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
643 set_cyc2ns_scale(cpu_khz_ref / 1000);
648 static struct notifier_block time_cpufreq_notifier_block = {
649 .notifier_call = time_cpufreq_notifier
652 static int __init cpufreq_tsc(void)
654 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
655 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
656 CPUFREQ_TRANSITION_NOTIFIER))
661 core_initcall(cpufreq_tsc);
666 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
667 * it to the HPET timer of known frequency.
670 #define TICK_COUNT 100000000
672 static unsigned int __init hpet_calibrate_tsc(void)
674 int tsc_start, hpet_start;
675 int tsc_now, hpet_now;
678 local_irq_save(flags);
681 hpet_start = hpet_readl(HPET_COUNTER);
686 hpet_now = hpet_readl(HPET_COUNTER);
689 local_irq_restore(flags);
690 } while ((tsc_now - tsc_start) < TICK_COUNT &&
691 (hpet_now - hpet_start) < TICK_COUNT);
693 return (tsc_now - tsc_start) * 1000000000L
694 / ((hpet_now - hpet_start) * hpet_period / 1000);
699 * pit_calibrate_tsc() uses the speaker output (channel 2) of
700 * the PIT. This is better than using the timer interrupt output,
701 * because we can read the value of the speaker with just one inb(),
702 * where we need three i/o operations for the interrupt channel.
703 * We count how many ticks the TSC does in 50 ms.
706 static unsigned int __init pit_calibrate_tsc(void)
708 unsigned long start, end;
711 spin_lock_irqsave(&i8253_lock, flags);
713 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
716 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
717 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
720 while ((inb(0x61) & 0x20) == 0);
724 spin_unlock_irqrestore(&i8253_lock, flags);
726 return (end - start) / 50;
730 static __init int late_hpet_init(void)
735 if (!vxtime.hpet_address)
738 memset(&hd, 0, sizeof (hd));
740 ntimer = hpet_readl(HPET_ID);
741 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
745 * Register with driver.
746 * Timer0 and Timer1 is used by platform.
748 hd.hd_phys_address = vxtime.hpet_address;
749 hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
750 hd.hd_nirqs = ntimer;
751 hd.hd_flags = HPET_DATA_PLATFORM;
752 hpet_reserve_timer(&hd, 0);
753 #ifdef CONFIG_HPET_EMULATE_RTC
754 hpet_reserve_timer(&hd, 1);
756 hd.hd_irq[0] = HPET_LEGACY_8254;
757 hd.hd_irq[1] = HPET_LEGACY_RTC;
760 struct hpet_timer *timer;
763 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
765 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
767 hd.hd_irq[i] = (timer->hpet_config &
768 Tn_INT_ROUTE_CNF_MASK) >>
769 Tn_INT_ROUTE_CNF_SHIFT;
776 fs_initcall(late_hpet_init);
779 static int hpet_timer_stop_set_go(unsigned long tick)
784 * Stop the timers and reset the main counter.
787 cfg = hpet_readl(HPET_CFG);
788 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
789 hpet_writel(cfg, HPET_CFG);
790 hpet_writel(0, HPET_COUNTER);
791 hpet_writel(0, HPET_COUNTER + 4);
794 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
795 * and period also hpet_tick.
798 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
799 HPET_TN_32BIT, HPET_T0_CFG);
800 hpet_writel(hpet_tick, HPET_T0_CMP);
801 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
807 cfg |= HPET_CFG_ENABLE | HPET_CFG_LEGACY;
808 hpet_writel(cfg, HPET_CFG);
813 static int hpet_init(void)
817 if (!vxtime.hpet_address)
819 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
820 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
823 * Read the period, compute tick and quotient.
826 id = hpet_readl(HPET_ID);
828 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER) ||
829 !(id & HPET_ID_LEGSUP))
832 hpet_period = hpet_readl(HPET_PERIOD);
833 if (hpet_period < 100000 || hpet_period > 100000000)
836 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
839 return hpet_timer_stop_set_go(hpet_tick);
842 static int hpet_reenable(void)
844 return hpet_timer_stop_set_go(hpet_tick);
847 void __init pit_init(void)
851 spin_lock_irqsave(&i8253_lock, flags);
852 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
853 outb_p(LATCH & 0xff, 0x40); /* LSB */
854 outb_p(LATCH >> 8, 0x40); /* MSB */
855 spin_unlock_irqrestore(&i8253_lock, flags);
858 int __init time_setup(char *str)
860 report_lost_ticks = 1;
864 static struct irqaction irq0 = {
865 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
868 extern void __init config_acpi_tables(void);
870 void __init time_init(void)
874 #ifdef HPET_HACK_ENABLE_DANGEROUS
875 if (!vxtime.hpet_address) {
876 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
878 outl(0x800038a0, 0xcf8);
879 outl(0xff000001, 0xcfc);
880 outl(0x800038a0, 0xcf8);
881 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
882 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
883 "at %#lx.\n", vxtime.hpet_address);
887 vxtime.hpet_address = 0;
889 xtime.tv_sec = get_cmos_time();
892 set_normalized_timespec(&wall_to_monotonic,
893 -xtime.tv_sec, -xtime.tv_nsec);
896 vxtime_hz = (1000000000000000L + hpet_period / 2) /
898 cpu_khz = hpet_calibrate_tsc();
902 cpu_khz = pit_calibrate_tsc();
906 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
907 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
908 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
909 cpu_khz / 1000, cpu_khz % 1000);
910 vxtime.mode = VXTIME_TSC;
911 vxtime.quot = (1000000L << 32) / vxtime_hz;
912 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
913 vxtime.hz = vxtime_hz;
914 rdtscll_sync(&vxtime.last_tsc);
917 set_cyc2ns_scale(cpu_khz / 1000);
920 void __init time_init_smp(void)
925 * AMD systems with more than one CPU don't have fully synchronized
926 * TSCs. Always use HPET gettimeofday for these, although it is slower.
927 * Intel SMP systems usually have synchronized TSCs, so use always
931 * IBM Summit2 checked by oem_force_hpet_timer().
932 * AMD dual core may also not need HPET. Check me.
934 * Can be turned off with "notsc".
936 if (num_online_cpus() > 1 &&
937 boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
939 /* Some systems will want to disable TSC and use HPET. */
940 if (oem_force_hpet_timer())
942 if (vxtime.hpet_address && notsc) {
944 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
945 vxtime.mode = VXTIME_HPET;
946 do_gettimeoffset = do_gettimeoffset_hpet;
948 timetype = vxtime.hpet_address ? "HPET/TSC" : "PIT/TSC";
949 vxtime.mode = VXTIME_TSC;
952 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
955 __setup("report_lost_ticks", time_setup);
957 static long clock_cmos_diff;
958 static unsigned long sleep_start;
960 static int timer_suspend(struct sys_device *dev, u32 state)
963 * Estimate time zone so that set_time can update the clock
965 long cmos_time = get_cmos_time();
967 clock_cmos_diff = -cmos_time;
968 clock_cmos_diff += get_seconds();
969 sleep_start = cmos_time;
973 static int timer_resume(struct sys_device *dev)
977 unsigned long ctime = get_cmos_time();
978 unsigned long sleep_length = (ctime - sleep_start) * HZ;
980 if (vxtime.hpet_address)
983 i8254_timer_resume();
985 sec = ctime + clock_cmos_diff;
986 write_seqlock_irqsave(&xtime_lock,flags);
989 write_sequnlock_irqrestore(&xtime_lock,flags);
990 jiffies += sleep_length;
991 wall_jiffies += sleep_length;
995 static struct sysdev_class timer_sysclass = {
996 .resume = timer_resume,
997 .suspend = timer_suspend,
998 set_kset_name("timer"),
1002 /* XXX this driverfs stuff should probably go elsewhere later -john */
1003 static struct sys_device device_timer = {
1005 .cls = &timer_sysclass,
1008 static int time_init_device(void)
1010 int error = sysdev_class_register(&timer_sysclass);
1012 error = sysdev_register(&device_timer);
1016 device_initcall(time_init_device);
1018 #ifdef CONFIG_HPET_EMULATE_RTC
1019 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1020 * is enabled, we support RTC interrupt functionality in software.
1021 * RTC has 3 kinds of interrupts:
1022 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1024 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1025 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1026 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1027 * (1) and (2) above are implemented using polling at a frequency of
1028 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1029 * overhead. (DEFAULT_RTC_INT_FREQ)
1030 * For (3), we use interrupts at 64Hz or user specified periodic
1031 * frequency, whichever is higher.
1033 #include <linux/rtc.h>
1035 extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
1037 #define DEFAULT_RTC_INT_FREQ 64
1038 #define RTC_NUM_INTS 1
1040 static unsigned long UIE_on;
1041 static unsigned long prev_update_sec;
1043 static unsigned long AIE_on;
1044 static struct rtc_time alarm_time;
1046 static unsigned long PIE_on;
1047 static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1048 static unsigned long PIE_count;
1050 static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1052 int is_hpet_enabled(void)
1054 return vxtime.hpet_address != 0;
1058 * Timer 1 for RTC, we do not use periodic interrupt feature,
1059 * even if HPET supports periodic interrupts on Timer 1.
1060 * The reason being, to set up a periodic interrupt in HPET, we need to
1061 * stop the main counter. And if we do that everytime someone diables/enables
1062 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1063 * So, for the time being, simulate the periodic interrupt in software.
1065 * hpet_rtc_timer_init() is called for the first time and during subsequent
1066 * interuppts reinit happens through hpet_rtc_timer_reinit().
1068 int hpet_rtc_timer_init(void)
1070 unsigned int cfg, cnt;
1071 unsigned long flags;
1073 if (!is_hpet_enabled())
1076 * Set the counter 1 and enable the interrupts.
1078 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1079 hpet_rtc_int_freq = PIE_freq;
1081 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1083 local_irq_save(flags);
1084 cnt = hpet_readl(HPET_COUNTER);
1085 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1086 hpet_writel(cnt, HPET_T1_CMP);
1087 local_irq_restore(flags);
1089 cfg = hpet_readl(HPET_T1_CFG);
1090 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1091 hpet_writel(cfg, HPET_T1_CFG);
1096 static void hpet_rtc_timer_reinit(void)
1098 unsigned int cfg, cnt;
1100 if (!(PIE_on | AIE_on | UIE_on))
1103 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1104 hpet_rtc_int_freq = PIE_freq;
1106 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1108 /* It is more accurate to use the comparator value than current count.*/
1109 cnt = hpet_readl(HPET_T1_CMP);
1110 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1111 hpet_writel(cnt, HPET_T1_CMP);
1113 cfg = hpet_readl(HPET_T1_CFG);
1114 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1115 hpet_writel(cfg, HPET_T1_CFG);
1121 * The functions below are called from rtc driver.
1122 * Return 0 if HPET is not being used.
1123 * Otherwise do the necessary changes and return 1.
1125 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1127 if (!is_hpet_enabled())
1130 if (bit_mask & RTC_UIE)
1132 if (bit_mask & RTC_PIE)
1134 if (bit_mask & RTC_AIE)
1140 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1142 int timer_init_reqd = 0;
1144 if (!is_hpet_enabled())
1147 if (!(PIE_on | AIE_on | UIE_on))
1148 timer_init_reqd = 1;
1150 if (bit_mask & RTC_UIE) {
1153 if (bit_mask & RTC_PIE) {
1157 if (bit_mask & RTC_AIE) {
1161 if (timer_init_reqd)
1162 hpet_rtc_timer_init();
1167 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1169 if (!is_hpet_enabled())
1172 alarm_time.tm_hour = hrs;
1173 alarm_time.tm_min = min;
1174 alarm_time.tm_sec = sec;
1179 int hpet_set_periodic_freq(unsigned long freq)
1181 if (!is_hpet_enabled())
1190 int hpet_rtc_dropped_irq(void)
1192 if (!is_hpet_enabled())
1198 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1200 struct rtc_time curr_time;
1201 unsigned long rtc_int_flag = 0;
1202 int call_rtc_interrupt = 0;
1204 hpet_rtc_timer_reinit();
1206 if (UIE_on | AIE_on) {
1207 rtc_get_rtc_time(&curr_time);
1210 if (curr_time.tm_sec != prev_update_sec) {
1211 /* Set update int info, call real rtc int routine */
1212 call_rtc_interrupt = 1;
1213 rtc_int_flag = RTC_UF;
1214 prev_update_sec = curr_time.tm_sec;
1219 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1220 /* Set periodic int info, call real rtc int routine */
1221 call_rtc_interrupt = 1;
1222 rtc_int_flag |= RTC_PF;
1227 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1228 (curr_time.tm_min == alarm_time.tm_min) &&
1229 (curr_time.tm_hour == alarm_time.tm_hour)) {
1230 /* Set alarm int info, call real rtc int routine */
1231 call_rtc_interrupt = 1;
1232 rtc_int_flag |= RTC_AF;
1235 if (call_rtc_interrupt) {
1236 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1237 rtc_interrupt(rtc_int_flag, dev_id, regs);
1245 static int __init nohpet_setup(char *s)
1251 __setup("nohpet", nohpet_setup);
1254 static int __init notsc_setup(char *s)
1260 __setup("notsc", notsc_setup);