svm_segment->base = segment->base;
}
-static bool vcpu_set_cell_config(struct cell *cell, struct vmcb *vmcb)
+static bool svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
{
/* No real need for this function; used for consistency with vmx.c */
vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
/* Explicitly mark all of the state as new */
vmcb->clean_bits = 0;
- return vcpu_set_cell_config(cpu_data->cell, vmcb);
+ return svm_set_cell_config(cpu_data->cell, vmcb);
}
unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
__builtin_unreachable();
}
-static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
+static void svm_vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
{
struct vmcb *vmcb = &cpu_data->vmcb;
unsigned long val;
/* Almost all of the guest state changed */
vmcb->clean_bits = 0;
- ok &= vcpu_set_cell_config(cpu_data->cell, vmcb);
+ ok &= svm_set_cell_config(cpu_data->cell, vmcb);
/* This is always false, but to be consistent with vmx.c... */
if (!ok) {
panic_printk("EFER: %p\n", vmcb->efer);
}
-static void vcpu_vendor_get_pf_intercept(struct per_cpu *cpu_data,
- struct vcpu_pf_intercept *out)
+static void svm_get_vcpu_pf_intercept(struct per_cpu *cpu_data,
+ struct vcpu_pf_intercept *out)
{
struct vmcb *vmcb = &cpu_data->vmcb;
out->is_write = !!(vmcb->exitinfo1 & 0x2);
}
-static void vcpu_vendor_get_io_intercept(struct per_cpu *cpu_data,
- struct vcpu_io_intercept *out)
+static void svm_get_vcpu_io_intercept(struct per_cpu *cpu_data,
+ struct vcpu_io_intercept *out)
{
struct vmcb *vmcb = &cpu_data->vmcb;
u64 exitinfo = vmcb->exitinfo1;
if (sipi_vector >= 0) {
printk("CPU %d received SIPI, vector %x\n",
cpu_data->cpu_id, sipi_vector);
- vcpu_reset(cpu_data, sipi_vector);
+ svm_vcpu_reset(cpu_data, sipi_vector);
memset(guest_regs, 0, sizeof(*guest_regs));
}
iommu_check_pending_faults(cpu_data);
} else {
/* General MMIO (IOAPIC, PCI etc) */
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
- vcpu_vendor_get_pf_intercept(cpu_data, &pf);
+ svm_get_vcpu_pf_intercept(cpu_data, &pf);
if (vcpu_handle_pt_violation(guest_regs, &pf))
return;
}
break;
case VMEXIT_IOIO:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
- vcpu_vendor_get_io_intercept(cpu_data, &io);
+ svm_get_vcpu_io_intercept(cpu_data, &io);
if (vcpu_handle_io_access(guest_regs, &io))
return;
break;
{
struct vmcb *vmcb = &cpu_data->vmcb;
- vcpu_reset(cpu_data, APIC_BSP_PSEUDO_SIPI);
+ svm_vcpu_reset(cpu_data, APIC_BSP_PSEUDO_SIPI);
/* No need to clear VMCB Clean bit: vcpu_reset() already does this */
vmcb->n_cr3 = paging_hvirt2phys(parked_mode_npt);
return ok;
}
-static bool vcpu_set_cell_config(struct cell *cell)
+static bool vmx_set_cell_config(struct cell *cell)
{
u8 *io_bitmap;
bool ok = true;
ok &= vmcs_write64(APIC_ACCESS_ADDR,
paging_hvirt2phys(apic_access_page));
- ok &= vcpu_set_cell_config(cpu_data->cell);
+ ok &= vmx_set_cell_config(cpu_data->cell);
ok &= vmcs_write32(EXCEPTION_BITMAP, 0);
__builtin_unreachable();
}
-static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
+static void vmx_vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
{
unsigned long val;
bool ok = true;
val &= ~VM_ENTRY_IA32E_MODE;
ok &= vmcs_write32(VM_ENTRY_CONTROLS, val);
- ok &= vcpu_set_cell_config(cpu_data->cell);
+ ok &= vmx_set_cell_config(cpu_data->cell);
if (!ok) {
panic_printk("FATAL: CPU reset failed\n");
void vcpu_park(struct per_cpu *cpu_data)
{
- vcpu_reset(cpu_data, 0);
+ vmx_vcpu_reset(cpu_data, 0);
vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_HLT);
}
panic_printk("EFER: %p\n", vmcs_read64(GUEST_IA32_EFER));
}
-static void vcpu_vendor_get_io_intercept(struct vcpu_io_intercept *out)
+static void vmx_get_vcpu_io_intercept(struct vcpu_io_intercept *out)
{
u64 exitq = vmcs_read64(EXIT_QUALIFICATION);
out->rep_or_str = !!(exitq & 0x30);
}
-static void vcpu_vendor_get_pf_intercept(struct vcpu_pf_intercept *out)
+static void vmx_get_vcpu_pf_intercept(struct vcpu_pf_intercept *out)
{
u64 exitq = vmcs_read64(EXIT_QUALIFICATION);
if (sipi_vector >= 0) {
printk("CPU %d received SIPI, vector %x\n",
cpu_data->cpu_id, sipi_vector);
- vcpu_reset(cpu_data, sipi_vector);
+ vmx_vcpu_reset(cpu_data, sipi_vector);
memset(guest_regs, 0, sizeof(*guest_regs));
}
iommu_check_pending_faults(cpu_data);
break;
case EXIT_REASON_IO_INSTRUCTION:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
- vcpu_vendor_get_io_intercept(&io);
+ vmx_get_vcpu_io_intercept(&io);
if (vcpu_handle_io_access(guest_regs, &io))
return;
break;
case EXIT_REASON_EPT_VIOLATION:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
- vcpu_vendor_get_pf_intercept(&pf);
+ vmx_get_vcpu_pf_intercept(&pf);
if (vcpu_handle_pt_violation(guest_regs, &pf))
return;
break;