cpu_data->wait_for_sipi = true;
}
-int x86_handle_events(struct per_cpu *cpu_data)
+void x86_check_events(void)
{
+ struct per_cpu *cpu_data = this_cpu_data();
int sipi_vector = -1;
spin_lock(&cpu_data->control_lock);
do {
if (cpu_data->init_signaled && !cpu_data->suspend_cpu) {
x86_enter_wait_for_sipi(cpu_data);
- sipi_vector = -1;
break;
}
/* wait_for_sipi is only modified on this CPU, so checking outside of
* control_lock is fine */
- if (cpu_data->wait_for_sipi)
+ if (cpu_data->wait_for_sipi) {
vcpu_park();
- else if (sipi_vector >= 0)
+ } else if (sipi_vector >= 0) {
+ printk("CPU %d received SIPI, vector %x\n", this_cpu_id(),
+ sipi_vector);
apic_clear();
+ vcpu_reset(sipi_vector);
+ }
- return sipi_vector;
+ iommu_check_pending_faults();
}
void __attribute__((noreturn))
void x86_send_init_sipi(unsigned int cpu_id, enum x86_init_sipi type,
int sipi_vector);
-int x86_handle_events(struct per_cpu *cpu_data);
+void x86_check_events(void);
void __attribute__((noreturn))
x86_exception_handler(struct exception_frame *frame);
{
struct vmcb *vmcb = &cpu_data->vmcb;
bool res = false;
- int sipi_vector;
vmcb->gs.base = read_msr(MSR_GS_BASE);
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
/* Temporarily enable GIF to consume pending NMI */
asm volatile("stgi; clgi" : : : "memory");
- sipi_vector = x86_handle_events(cpu_data);
- if (sipi_vector >= 0) {
- printk("CPU %d received SIPI, vector %x\n",
- cpu_data->cpu_id, sipi_vector);
- vcpu_reset(sipi_vector);
- }
- iommu_check_pending_faults();
+ x86_check_events();
goto vmentry;
case VMEXIT_VMMCALL:
vcpu_handle_hypercall();
vmcs_write64(GUEST_RIP, vmcs_read64(GUEST_RIP) + inst_len);
}
+static void vmx_check_events(void)
+{
+ vmx_preemption_timer_set_enable(false);
+ x86_check_events();
+}
+
static void update_efer(void)
{
unsigned long efer = vmcs_read64(GUEST_IA32_EFER);
void vcpu_handle_exit(struct per_cpu *cpu_data)
{
u32 reason = vmcs_read32(VM_EXIT_REASON);
- int sipi_vector;
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
/* fall through */
case EXIT_REASON_PREEMPTION_TIMER:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
- vmx_preemption_timer_set_enable(false);
- sipi_vector = x86_handle_events(cpu_data);
- if (sipi_vector >= 0) {
- printk("CPU %d received SIPI, vector %x\n",
- cpu_data->cpu_id, sipi_vector);
- vcpu_reset(sipi_vector);
- }
- iommu_check_pending_faults();
+ vmx_check_events();
return;
case EXIT_REASON_CPUID:
vcpu_handle_cpuid();