]> rtime.felk.cvut.cz Git - jailhouse.git/commitdiff
x86: Enhance x86_handle_events to x86_check_events
authorJan Kiszka <jan.kiszka@siemens.com>
Mon, 21 Dec 2015 23:50:36 +0000 (00:50 +0100)
committerJan Kiszka <jan.kiszka@siemens.com>
Tue, 22 Dec 2015 14:20:22 +0000 (15:20 +0100)
There is now quite some commonality between svm and vmx when it comes to
checking for pending events. Move those parts into x86_check_events,
which becomes the extended version of x86_handle_events. Only a small
difference is now left behind in vmx_check_events(): the preemption
timer has to be disabled before the check.

Just like x86_handle_events, also x86_check_events only works against
the caller's CPU. So remove the cpu_data parameter at this chance.

We can remove the "sipi_vector = -1" after x86_enter_wait_for_sipi now
because we no longer return that value from x86_check_events, and
sipi_vector is not evaluated elsewhere because cpu_data->wait_for_sipi
is true.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
hypervisor/arch/x86/control.c
hypervisor/arch/x86/include/asm/control.h
hypervisor/arch/x86/svm.c
hypervisor/arch/x86/vmx.c

index 3ba7b566e418dbf74f95a10c1be01b208422a464..39c538412a25a15f5fcfd02a4048de4a6ab29acf 100644 (file)
@@ -220,8 +220,9 @@ static void x86_enter_wait_for_sipi(struct per_cpu *cpu_data)
        cpu_data->wait_for_sipi = true;
 }
 
-int x86_handle_events(struct per_cpu *cpu_data)
+void x86_check_events(void)
 {
+       struct per_cpu *cpu_data = this_cpu_data();
        int sipi_vector = -1;
 
        spin_lock(&cpu_data->control_lock);
@@ -229,7 +230,6 @@ int x86_handle_events(struct per_cpu *cpu_data)
        do {
                if (cpu_data->init_signaled && !cpu_data->suspend_cpu) {
                        x86_enter_wait_for_sipi(cpu_data);
-                       sipi_vector = -1;
                        break;
                }
 
@@ -268,12 +268,16 @@ int x86_handle_events(struct per_cpu *cpu_data)
 
        /* wait_for_sipi is only modified on this CPU, so checking outside of
         * control_lock is fine */
-       if (cpu_data->wait_for_sipi)
+       if (cpu_data->wait_for_sipi) {
                vcpu_park();
-       else if (sipi_vector >= 0)
+       } else if (sipi_vector >= 0) {
+               printk("CPU %d received SIPI, vector %x\n", this_cpu_id(),
+                      sipi_vector);
                apic_clear();
+               vcpu_reset(sipi_vector);
+       }
 
-       return sipi_vector;
+       iommu_check_pending_faults();
 }
 
 void __attribute__((noreturn))
index b95fdb4e4efc00d42cf53300f79a08ecef0b1b18..433d76b140e0c02c199785fee5f50ef798c6d721 100644 (file)
@@ -19,7 +19,7 @@ enum x86_init_sipi { X86_INIT, X86_SIPI };
 void x86_send_init_sipi(unsigned int cpu_id, enum x86_init_sipi type,
                        int sipi_vector);
 
-int x86_handle_events(struct per_cpu *cpu_data);
+void x86_check_events(void);
 
 void __attribute__((noreturn))
 x86_exception_handler(struct exception_frame *frame);
index d9b375c9d857f30fc37a0d38cfb78ae277c3b069..d45627e922231dff05026a752061fd09fa4f5eb7 100644 (file)
@@ -851,7 +851,6 @@ void vcpu_handle_exit(struct per_cpu *cpu_data)
 {
        struct vmcb *vmcb = &cpu_data->vmcb;
        bool res = false;
-       int sipi_vector;
 
        vmcb->gs.base = read_msr(MSR_GS_BASE);
 
@@ -874,13 +873,7 @@ void vcpu_handle_exit(struct per_cpu *cpu_data)
                cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
                /* Temporarily enable GIF to consume pending NMI */
                asm volatile("stgi; clgi" : : : "memory");
-               sipi_vector = x86_handle_events(cpu_data);
-               if (sipi_vector >= 0) {
-                       printk("CPU %d received SIPI, vector %x\n",
-                              cpu_data->cpu_id, sipi_vector);
-                       vcpu_reset(sipi_vector);
-               }
-               iommu_check_pending_faults();
+               x86_check_events();
                goto vmentry;
        case VMEXIT_VMMCALL:
                vcpu_handle_hypercall();
index 6e3bc6a087480bde280f0c3172ab785a7c19c46a..ba0159c3f6a9632719bca18542258938277647fc 100644 (file)
@@ -887,6 +887,12 @@ void vcpu_skip_emulated_instruction(unsigned int inst_len)
        vmcs_write64(GUEST_RIP, vmcs_read64(GUEST_RIP) + inst_len);
 }
 
+static void vmx_check_events(void)
+{
+       vmx_preemption_timer_set_enable(false);
+       x86_check_events();
+}
+
 static void update_efer(void)
 {
        unsigned long efer = vmcs_read64(GUEST_IA32_EFER);
@@ -1045,7 +1051,6 @@ void vcpu_vendor_get_mmio_intercept(struct vcpu_mmio_intercept *mmio)
 void vcpu_handle_exit(struct per_cpu *cpu_data)
 {
        u32 reason = vmcs_read32(VM_EXIT_REASON);
-       int sipi_vector;
 
        cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
 
@@ -1055,14 +1060,7 @@ void vcpu_handle_exit(struct per_cpu *cpu_data)
                /* fall through */
        case EXIT_REASON_PREEMPTION_TIMER:
                cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
-               vmx_preemption_timer_set_enable(false);
-               sipi_vector = x86_handle_events(cpu_data);
-               if (sipi_vector >= 0) {
-                       printk("CPU %d received SIPI, vector %x\n",
-                              cpu_data->cpu_id, sipi_vector);
-                       vcpu_reset(sipi_vector);
-               }
-               iommu_check_pending_faults();
+               vmx_check_events();
                return;
        case EXIT_REASON_CPUID:
                vcpu_handle_cpuid();