]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blobdiff - arch/x86/kernel/kvmclock.c
Merge tag 'kvm-3.8-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[can-eth-gw-linux.git] / arch / x86 / kernel / kvmclock.c
index f1b42b3a186c7ec7594203b0e58c31e424181908..220a360010f8a01ffbcdce4dbd98e12503144f47 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/apic.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/memblock.h>
 
 #include <asm/x86_init.h>
 #include <asm/reboot.h>
@@ -39,7 +40,7 @@ static int parse_no_kvmclock(char *arg)
 early_param("no-kvmclock", parse_no_kvmclock);
 
 /* The hypervisor will put information about time periodically here */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct pvclock_vcpu_time_info, hv_clock);
+static struct pvclock_vsyscall_time_info *hv_clock;
 static struct pvclock_wall_clock wall_clock;
 
 /*
@@ -52,15 +53,20 @@ static unsigned long kvm_get_wallclock(void)
        struct pvclock_vcpu_time_info *vcpu_time;
        struct timespec ts;
        int low, high;
+       int cpu;
 
        low = (int)__pa_symbol(&wall_clock);
        high = ((u64)__pa_symbol(&wall_clock) >> 32);
 
        native_write_msr(msr_kvm_wall_clock, low, high);
 
-       vcpu_time = &get_cpu_var(hv_clock);
+       preempt_disable();
+       cpu = smp_processor_id();
+
+       vcpu_time = &hv_clock[cpu].pvti;
        pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
-       put_cpu_var(hv_clock);
+
+       preempt_enable();
 
        return ts.tv_sec;
 }
@@ -74,9 +80,11 @@ static cycle_t kvm_clock_read(void)
 {
        struct pvclock_vcpu_time_info *src;
        cycle_t ret;
+       int cpu;
 
        preempt_disable_notrace();
-       src = &__get_cpu_var(hv_clock);
+       cpu = smp_processor_id();
+       src = &hv_clock[cpu].pvti;
        ret = pvclock_clocksource_read(src);
        preempt_enable_notrace();
        return ret;
@@ -99,8 +107,15 @@ static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
 static unsigned long kvm_get_tsc_khz(void)
 {
        struct pvclock_vcpu_time_info *src;
-       src = &per_cpu(hv_clock, 0);
-       return pvclock_tsc_khz(src);
+       int cpu;
+       unsigned long tsc_khz;
+
+       preempt_disable();
+       cpu = smp_processor_id();
+       src = &hv_clock[cpu].pvti;
+       tsc_khz = pvclock_tsc_khz(src);
+       preempt_enable();
+       return tsc_khz;
 }
 
 static void kvm_get_preset_lpj(void)
@@ -119,10 +134,14 @@ bool kvm_check_and_clear_guest_paused(void)
 {
        bool ret = false;
        struct pvclock_vcpu_time_info *src;
+       int cpu = smp_processor_id();
 
-       src = &__get_cpu_var(hv_clock);
+       if (!hv_clock)
+               return ret;
+
+       src = &hv_clock[cpu].pvti;
        if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
-               __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
+               src->flags &= ~PVCLOCK_GUEST_STOPPED;
                ret = true;
        }
 
@@ -141,9 +160,10 @@ int kvm_register_clock(char *txt)
 {
        int cpu = smp_processor_id();
        int low, high, ret;
+       struct pvclock_vcpu_time_info *src = &hv_clock[cpu].pvti;
 
-       low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
-       high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
+       low = (int)__pa(src) | 1;
+       high = ((u64)__pa(src) >> 32);
        ret = native_write_msr_safe(msr_kvm_system_time, low, high);
        printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
               cpu, high, low, txt);
@@ -197,6 +217,8 @@ static void kvm_shutdown(void)
 
 void __init kvmclock_init(void)
 {
+       unsigned long mem;
+
        if (!kvm_para_available())
                return;
 
@@ -209,8 +231,18 @@ void __init kvmclock_init(void)
        printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
                msr_kvm_system_time, msr_kvm_wall_clock);
 
-       if (kvm_register_clock("boot clock"))
+       mem = memblock_alloc(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS,
+                            PAGE_SIZE);
+       if (!mem)
+               return;
+       hv_clock = __va(mem);
+
+       if (kvm_register_clock("boot clock")) {
+               hv_clock = NULL;
+               memblock_free(mem,
+                       sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
                return;
+       }
        pv_time_ops.sched_clock = kvm_clock_read;
        x86_platform.calibrate_tsc = kvm_get_tsc_khz;
        x86_platform.get_wallclock = kvm_get_wallclock;
@@ -233,3 +265,37 @@ void __init kvmclock_init(void)
        if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
                pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
 }
+
+int __init kvm_setup_vsyscall_timeinfo(void)
+{
+#ifdef CONFIG_X86_64
+       int cpu;
+       int ret;
+       u8 flags;
+       struct pvclock_vcpu_time_info *vcpu_time;
+       unsigned int size;
+
+       size = sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS;
+
+       preempt_disable();
+       cpu = smp_processor_id();
+
+       vcpu_time = &hv_clock[cpu].pvti;
+       flags = pvclock_read_flags(vcpu_time);
+
+       if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
+               preempt_enable();
+               return 1;
+       }
+
+       if ((ret = pvclock_init_vsyscall(hv_clock, size))) {
+               preempt_enable();
+               return ret;
+       }
+
+       preempt_enable();
+
+       kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+#endif
+       return 0;
+}