*/
#include <jailhouse/entry.h>
+#include <jailhouse/cell.h>
#include <jailhouse/cell-config.h>
#include <jailhouse/control.h>
#include <jailhouse/paging.h>
#include <jailhouse/processor.h>
#include <jailhouse/string.h>
#include <jailhouse/utils.h>
+#include <asm/amd_iommu.h>
#include <asm/apic.h>
-#include <asm/cell.h>
#include <asm/control.h>
#include <asm/iommu.h>
#include <asm/paging.h>
* combinations of NW and CD bits are prohibited by SVM (see APMv2,
* Sect. 15.5). To handle this, we always keep the NW bit off.
*/
-#define SVM_CR0_CLEARED_BITS ~X86_CR0_NW
+#define SVM_CR0_ALLOWED_BITS (~X86_CR0_NW)
+
+/* IOPM size: two 4-K pages + 3 bits */
+#define IOPM_PAGES 3
+
+#define NPT_IOMMU_PAGE_DIR_LEVELS 4
static bool has_avic, has_assists, has_flush_by_asid;
static const struct segment invalid_seg;
-static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
+static struct paging npt_iommu_paging[NPT_IOMMU_PAGE_DIR_LEVELS];
+/* bit cleared: direct access allowed */
+// TODO: convert to whitelist
static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
[ SVM_MSRPM_0000 ] = {
[ 0/4 ... 0x017/4 ] = 0,
[ 0x018/4 ... 0x01b/4 ] = 0x80, /* 0x01b (w) */
- [ 0x01c/4 ... 0x7ff/4 ] = 0,
+ [ 0x01c/4 ... 0x1ff/4 ] = 0,
+ [ 0x200/4 ... 0x273/4 ] = 0xaa, /* 0x200 - 0x273 (w) */
+ [ 0x274/4 ... 0x277/4 ] = 0xea, /* 0x274 - 0x276 (w), 0x277 (rw) */
+ [ 0x278/4 ... 0x2fb/4 ] = 0,
+ [ 0x2fc/4 ... 0x2ff/4 ] = 0x80, /* 0x2ff (w) */
+ [ 0x300/4 ... 0x7ff/4 ] = 0,
/* x2APIC MSRs - emulated if not present */
[ 0x800/4 ... 0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
[ 0x804/4 ... 0x807/4 ] = 0,
}
};
+/* This page is mapped so the code begins at 0x000ffff0 */
+static u8 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE] = {
+ [0xff0] = 0xfa, /* 1: cli */
+ [0xff1] = 0xf4, /* hlt */
+ [0xff2] = 0xeb,
+ [0xff3] = 0xfc /* jmp 1b */
+};
+
+static void *parked_mode_npt;
+
static void *avic_page;
static int svm_check_features(void)
{
/* SVM is available */
- if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
- return -ENODEV;
+ if (!(cpuid_ecx(0x80000001, 0) & X86_FEATURE_SVM))
+ return trace_error(-ENODEV);
/* Nested paging */
- if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
- return -EIO;
+ if (!(cpuid_edx(0x8000000A, 0) & X86_FEATURE_NP))
+ return trace_error(-EIO);
/* Decode assists */
- if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
+ if ((cpuid_edx(0x8000000A, 0) & X86_FEATURE_DECODE_ASSISTS))
has_assists = true;
/* AVIC support */
- if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
- has_avic = true;
+ /* FIXME: Jailhouse support is incomplete so far
+ if (cpuid_edx(0x8000000A, 0) & X86_FEATURE_AVIC)
+ has_avic = true; */
/* TLB Flush by ASID support */
- if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
+ if (cpuid_edx(0x8000000A, 0) & X86_FEATURE_FLUSH_BY_ASID)
has_flush_by_asid = true;
return 0;
static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
const struct desc_table_reg *dtr)
{
- struct svm_segment tmp = { 0 };
-
- if (dtr) {
- tmp.base = dtr->base;
- tmp.limit = dtr->limit & 0xffff;
- }
-
- *svm_segment = tmp;
+ svm_segment->base = dtr->base;
+ svm_segment->limit = dtr->limit & 0xffff;
}
-/* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
const struct segment *segment)
{
- u32 ar;
-
svm_segment->selector = segment->selector;
-
- if (segment->access_rights == 0x10000) {
- svm_segment->access_rights = 0;
- } else {
- ar = segment->access_rights;
- svm_segment->access_rights =
- ((ar & 0xf000) >> 4) | (ar & 0x00ff);
- }
-
+ svm_segment->access_rights = ((segment->access_rights & 0xf000) >> 4) |
+ (segment->access_rights & 0x00ff);
svm_segment->limit = segment->limit;
svm_segment->base = segment->base;
}
-static bool vcpu_set_cell_config(struct cell *cell, struct vmcb *vmcb)
+static void svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
{
- /* No real need for this function; used for consistency with vmx.c */
- vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
- vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
-
- return true;
+ vmcb->iopm_base_pa = paging_hvirt2phys(cell->arch.svm.iopm);
+ vmcb->n_cr3 =
+ paging_hvirt2phys(cell->arch.svm.npt_iommu_structs.root_table);
}
-static int vmcb_setup(struct per_cpu *cpu_data)
+static void vmcb_setup(struct per_cpu *cpu_data)
{
struct vmcb *vmcb = &cpu_data->vmcb;
memset(vmcb, 0, sizeof(struct vmcb));
- vmcb->cr0 = read_cr0() & SVM_CR0_CLEARED_BITS;
+ vmcb->cr0 = cpu_data->linux_cr0 & SVM_CR0_ALLOWED_BITS;
vmcb->cr3 = cpu_data->linux_cr3;
- vmcb->cr4 = read_cr4();
+ vmcb->cr4 = cpu_data->linux_cr4;
set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
+ set_svm_segment_from_segment(&vmcb->ldtr, &invalid_seg);
- set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
/* Make the hypervisor visible */
vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
- /* Linux uses custom PAT setting */
- vmcb->g_pat = read_msr(MSR_IA32_PAT);
+ vmcb->g_pat = cpu_data->pat;
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
- /* TODO: Do we need this for SVM ? */
- /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
+ vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID;
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
+ /*
+ * We only intercept #DB and #AC to prevent that malicious guests can
+ * trigger infinite loops in microcode (see e.g. CVE-2015-5307 and
+ * CVE-2015-8104).
+ */
+ vmcb->exception_intercepts |= (1 << DB_VECTOR) | (1 << AC_VECTOR);
+
vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
vmcb->np_enable = 1;
/* TODO: Setup AVIC */
- return vcpu_set_cell_config(cpu_data->cell, vmcb);
+ /* Explicitly mark all of the state as new */
+ vmcb->clean_bits = 0;
+
+ svm_set_cell_config(cpu_data->cell, vmcb);
}
unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
unsigned long gphys,
unsigned long flags)
{
- return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
- gphys, flags);
+ return paging_virt2phys(&cpu_data->cell->arch.svm.npt_iommu_structs,
+ gphys, flags);
+}
+
+static void npt_iommu_set_next_pt_l4(pt_entry_t pte, unsigned long next_pt)
+{
+ /*
+ * Merge IOMMU and NPT flags. We need to mark the NTP entries as user
+ * accessible, see APMv2, Section 15.25.5.
+ */
+ *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(3) |
+ AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P |
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_US;
+}
+
+static void npt_iommu_set_next_pt_l3(pt_entry_t pte, unsigned long next_pt)
+{
+ *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(2) |
+ AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P |
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_US;
}
-static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
+static void npt_iommu_set_next_pt_l2(pt_entry_t pte, unsigned long next_pt)
{
- /* See APMv2, Section 15.25.5 */
- *pte = (next_pt & 0x000ffffffffff000UL) |
- (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
+ *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(1) |
+ AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P |
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_US;
+}
+
+static unsigned long npt_iommu_get_phys_l3(pt_entry_t pte, unsigned long virt)
+{
+ if (*pte & AMD_IOMMU_PTE_PG_MODE_MASK)
+ return INVALID_PHYS_ADDR;
+ return (*pte & BIT_MASK(51, 30)) | (virt & BIT_MASK(29, 0));
+}
+
+static unsigned long npt_iommu_get_phys_l2(pt_entry_t pte, unsigned long virt)
+{
+ if (*pte & AMD_IOMMU_PTE_PG_MODE_MASK)
+ return INVALID_PHYS_ADDR;
+ return (*pte & BIT_MASK(51, 21)) | (virt & BIT_MASK(20, 0));
}
int vcpu_vendor_init(void)
{
+ struct paging_structures parking_pt;
unsigned long vm_cr;
- int err, n;
+ int err;
err = svm_check_features();
if (err)
vm_cr = read_msr(MSR_VM_CR);
if (vm_cr & VM_CR_SVMDIS)
/* SVM disabled in BIOS */
- return -EPERM;
+ return trace_error(-EPERM);
- /* Nested paging is the same as the native one */
- memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
- for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
- npt_paging[n].set_next_pt = npt_set_next_pt;
+ /*
+ * Nested paging is almost the same as the native one. However, we
+ * need to override some handlers in order to reuse the page table for
+ * the IOMMU as well.
+ */
+ memcpy(npt_iommu_paging, x86_64_paging, sizeof(npt_iommu_paging));
+ npt_iommu_paging[0].set_next_pt = npt_iommu_set_next_pt_l4;
+ npt_iommu_paging[1].set_next_pt = npt_iommu_set_next_pt_l3;
+ npt_iommu_paging[2].set_next_pt = npt_iommu_set_next_pt_l2;
+ npt_iommu_paging[1].get_phys = npt_iommu_get_phys_l3;
+ npt_iommu_paging[2].get_phys = npt_iommu_get_phys_l2;
+
+ /* Map guest parking code (shared between cells and CPUs) */
+ parking_pt.root_paging = npt_iommu_paging;
+ parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
+ if (!parked_mode_npt)
+ return -ENOMEM;
+ err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
+ PAGE_SIZE, 0x000ff000,
+ PAGE_READONLY_FLAGS | PAGE_FLAG_US,
+ PAGING_NON_COHERENT);
+ if (err)
+ return err;
/* This is always false for AMD now (except in nested SVM);
see Sect. 16.3.1 in APMv2 */
(MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
} else {
- /* Enable Extended Interrupt LVT */
- apic_reserved_bits[0x50] = 0;
if (has_avic) {
avic_page = page_alloc(&remap_pool, 1);
if (!avic_page)
- return -ENOMEM;
+ return trace_error(-ENOMEM);
}
}
int vcpu_vendor_cell_init(struct cell *cell)
{
+ int err = -ENOMEM;
u64 flags;
- int err;
- /* allocate iopm (two 4-K pages + 3 bits) */
- cell->svm.iopm = page_alloc(&mem_pool, 3);
- if (!cell->svm.iopm)
- return -ENOMEM;
+ /* allocate iopm */
+ cell->arch.svm.iopm = page_alloc(&mem_pool, IOPM_PAGES);
+ if (!cell->arch.svm.iopm)
+ return err;
/* build root NPT of cell */
- cell->svm.npt_structs.root_paging = npt_paging;
- cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
- if (!cell->svm.npt_structs.root_table)
- return -ENOMEM;
+ cell->arch.svm.npt_iommu_structs.root_paging = npt_iommu_paging;
+ cell->arch.svm.npt_iommu_structs.root_table =
+ (page_table_t)cell->arch.root_table_page;
if (!has_avic) {
/*
* Map xAPIC as is; reads are passed, writes are trapped.
*/
- flags = PAGE_READONLY_FLAGS |
- PAGE_FLAG_US |
- PAGE_FLAG_UNCACHED;
- err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
- PAGE_SIZE, XAPIC_BASE,
- flags,
- PAGING_NON_COHERENT);
+ flags = PAGE_READONLY_FLAGS | PAGE_FLAG_US | PAGE_FLAG_DEVICE;
+ err = paging_create(&cell->arch.svm.npt_iommu_structs,
+ XAPIC_BASE, PAGE_SIZE, XAPIC_BASE,
+ flags, PAGING_NON_COHERENT);
} else {
- flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED;
- err = paging_create(&cell->svm.npt_structs,
+ flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE;
+ err = paging_create(&cell->arch.svm.npt_iommu_structs,
paging_hvirt2phys(avic_page),
PAGE_SIZE, XAPIC_BASE,
- flags,
- PAGING_NON_COHERENT);
+ flags, PAGING_NON_COHERENT);
}
+ if (err)
+ goto err_free_iopm;
+
+ return 0;
+
+err_free_iopm:
+ page_free(&mem_pool, cell->arch.svm.iopm, 3);
return err;
}
const struct jailhouse_memory *mem)
{
u64 phys_start = mem->phys_start;
- u32 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
+ u64 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
if (mem->flags & JAILHOUSE_MEM_READ)
flags |= PAGE_FLAG_PRESENT;
if (mem->flags & JAILHOUSE_MEM_WRITE)
flags |= PAGE_FLAG_RW;
- if (mem->flags & JAILHOUSE_MEM_EXECUTE)
- flags |= PAGE_FLAG_EXECUTE;
+ if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
+ flags |= PAGE_FLAG_NOEXECUTE;
if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
phys_start = paging_hvirt2phys(&cell->comm_page);
- return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
- mem->virt_start, flags, PAGING_NON_COHERENT);
+ flags |= amd_iommu_get_memory_region_flags(mem);
+
+ /*
+ * As we also manipulate the IOMMU page table, changes need to be
+ * coherent.
+ */
+ return paging_create(&cell->arch.svm.npt_iommu_structs, phys_start,
+ mem->size, mem->virt_start, flags,
+ PAGING_COHERENT);
}
int vcpu_unmap_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
- return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
- mem->size, PAGING_NON_COHERENT);
+ return paging_destroy(&cell->arch.svm.npt_iommu_structs,
+ mem->virt_start, mem->size, PAGING_COHERENT);
}
void vcpu_vendor_cell_exit(struct cell *cell)
{
- paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
- PAGING_NON_COHERENT);
- page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
+ paging_destroy(&cell->arch.svm.npt_iommu_structs, XAPIC_BASE,
+ PAGE_SIZE, PAGING_NON_COHERENT);
+ page_free(&mem_pool, cell->arch.svm.iopm, 3);
}
int vcpu_init(struct per_cpu *cpu_data)
efer = read_msr(MSR_EFER);
if (efer & EFER_SVME)
- return -EBUSY;
+ return trace_error(-EBUSY);
efer |= EFER_SVME;
write_msr(MSR_EFER, efer);
cpu_data->svm_state = SVMON;
- if (!vmcb_setup(cpu_data))
+ vmcb_setup(cpu_data);
+
+ /*
+ * APM Volume 2, 3.1.1: "When writing the CR0 register, software should
+ * set the values of reserved bits to the values found during the
+ * previous CR0 read."
+ * But we want to avoid surprises with new features unknown to us but
+ * set by Linux. So check if any assumed revered bit was set and bail
+ * out if so.
+ * Note that the APM defines all reserved CR4 bits as must-be-zero.
+ */
+ if (cpu_data->linux_cr0 & X86_CR0_RESERVED)
return -EIO;
- write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
+ /* bring CR0 and CR4 into well-defined states */
+ write_cr0(X86_CR0_HOST_STATE);
+ write_cr4(X86_CR4_HOST_STATE);
- /* Enable Extended Interrupt LVT (xAPIC, as it is AMD-only) */
- if (!using_x2apic)
- apic_reserved_bits[0x50] = 0;
+ write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
return 0;
}
write_msr(MSR_VM_HSAVE_PA, 0);
}
-void vcpu_activate_vmm(struct per_cpu *cpu_data)
+void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data)
{
unsigned long vmcb_pa, host_stack;
vmcb_pa = paging_hvirt2phys(&cpu_data->vmcb);
host_stack = (unsigned long)cpu_data->stack + sizeof(cpu_data->stack);
- /* Clear host-mode MSRs */
- write_msr(MSR_IA32_SYSENTER_CS, 0);
- write_msr(MSR_IA32_SYSENTER_EIP, 0);
- write_msr(MSR_IA32_SYSENTER_ESP, 0);
-
- write_msr(MSR_STAR, 0);
- write_msr(MSR_LSTAR, 0);
- write_msr(MSR_CSTAR, 0);
- write_msr(MSR_SFMASK, 0);
- write_msr(MSR_KERNGS_BASE, 0);
-
- /*
- * XXX: We don't set our own PAT here but rather rely on Linux PAT
- * settigs (and MTRRs). Potentially, a malicious Linux root cell can
- * set values different from what we expect, and interfere with APIC
- * virtualization in non-AVIC mode.
- */
-
/* We enter Linux at the point arch_entry would return to as well.
* rax is cleared to signal success to the caller. */
asm volatile(
"mov 0x18(%%rdi),%%r12\n\t"
"mov 0x20(%%rdi),%%rbx\n\t"
"mov 0x28(%%rdi),%%rbp\n\t"
- "mov %0, %%rax\n\t"
- "vmload\n\t"
- "vmrun\n\t"
- "vmsave\n\t"
- /* Restore hypervisor stack */
- "mov %2, %%rsp\n\t"
- "jmp svm_vmexit"
+ "mov %2,%%rsp\n\t"
+ "vmload %%rax\n\t"
+ "jmp svm_vmentry"
: /* no output */
- : "m" (vmcb_pa), "D" (cpu_data->linux_reg), "m" (host_stack)
- : "memory", "r15", "r14", "r13", "r12",
- "rbx", "rbp", "rax", "cc");
+ : "D" (cpu_data->linux_reg), "a" (vmcb_pa), "m" (host_stack));
__builtin_unreachable();
}
-void __attribute__((noreturn))
-vcpu_deactivate_vmm(struct registers *guest_regs)
+void __attribute__((noreturn)) vcpu_deactivate_vmm(void)
{
struct per_cpu *cpu_data = this_cpu_data();
struct vmcb *vmcb = &cpu_data->vmcb;
unsigned long *stack = (unsigned long *)vmcb->rsp;
unsigned long linux_ip = vmcb->rip;
- /* We are leaving - set the GIF */
- asm volatile ("stgi" : : : "memory");
-
- /*
- * Restore the MSRs.
- *
- * XXX: One could argue this is better to be done in
- * arch_cpu_restore(), however, it would require changes
- * to cpu_data to store STAR and friends.
- */
- write_msr(MSR_STAR, vmcb->star);
- write_msr(MSR_LSTAR, vmcb->lstar);
- write_msr(MSR_CSTAR, vmcb->cstar);
- write_msr(MSR_SFMASK, vmcb->sfmask);
- write_msr(MSR_KERNGS_BASE, vmcb->kerngsbase);
-
+ cpu_data->linux_cr0 = vmcb->cr0;
cpu_data->linux_cr3 = vmcb->cr3;
cpu_data->linux_gdtr.base = vmcb->gdtr.base;
cpu_data->linux_cs.selector = vmcb->cs.selector;
- cpu_data->linux_tss.selector = vmcb->tr.selector;
+ asm volatile("str %0" : "=m" (cpu_data->linux_tss.selector));
cpu_data->linux_efer = vmcb->efer & (~EFER_SVME);
- cpu_data->linux_fs.base = vmcb->fs.base;
+ cpu_data->linux_fs.base = read_msr(MSR_FS_BASE);
cpu_data->linux_gs.base = vmcb->gs.base;
- cpu_data->linux_sysenter_cs = vmcb->sysenter_cs;
- cpu_data->linux_sysenter_eip = vmcb->sysenter_eip;
- cpu_data->linux_sysenter_esp = vmcb->sysenter_esp;
-
cpu_data->linux_ds.selector = vmcb->ds.selector;
cpu_data->linux_es.selector = vmcb->es.selector;
- cpu_data->linux_fs.selector = vmcb->fs.selector;
- cpu_data->linux_gs.selector = vmcb->gs.selector;
- arch_cpu_restore(cpu_data);
+ asm volatile("mov %%fs,%0" : "=m" (cpu_data->linux_fs.selector));
+ asm volatile("mov %%gs,%0" : "=m" (cpu_data->linux_gs.selector));
+
+ arch_cpu_restore(cpu_data, 0);
stack--;
*stack = linux_ip;
"mov %%rax,%%rsp\n\t"
"xor %%rax,%%rax\n\t"
"ret"
- : : "a" (stack), "b" (guest_regs));
+ : : "a" (stack), "b" (&cpu_data->guest_regs));
__builtin_unreachable();
}
-static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
+void vcpu_vendor_reset(unsigned int sipi_vector)
{
+ static const struct svm_segment dataseg_reset_state = {
+ .selector = 0,
+ .base = 0,
+ .limit = 0xffff,
+ .access_rights = 0x0093,
+ };
+ static const struct svm_segment dtr_reset_state = {
+ .selector = 0,
+ .base = 0,
+ .limit = 0xffff,
+ .access_rights = 0,
+ };
+ struct per_cpu *cpu_data = this_cpu_data();
struct vmcb *vmcb = &cpu_data->vmcb;
unsigned long val;
- bool ok = true;
vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
vmcb->cr3 = 0;
vmcb->cs.limit = 0xffff;
vmcb->cs.access_rights = 0x009b;
- vmcb->ds.selector = 0;
- vmcb->ds.base = 0;
- vmcb->ds.limit = 0xffff;
- vmcb->ds.access_rights = 0x0093;
-
- vmcb->es.selector = 0;
- vmcb->es.base = 0;
- vmcb->es.limit = 0xffff;
- vmcb->es.access_rights = 0x0093;
-
- vmcb->fs.selector = 0;
- vmcb->fs.base = 0;
- vmcb->fs.limit = 0xffff;
- vmcb->fs.access_rights = 0x0093;
-
- vmcb->gs.selector = 0;
- vmcb->gs.base = 0;
- vmcb->gs.limit = 0xffff;
- vmcb->gs.access_rights = 0x0093;
-
- vmcb->ss.selector = 0;
- vmcb->ss.base = 0;
- vmcb->ss.limit = 0xffff;
- vmcb->ss.access_rights = 0x0093;
+ vmcb->ds = dataseg_reset_state;
+ vmcb->es = dataseg_reset_state;
+ vmcb->fs = dataseg_reset_state;
+ vmcb->gs = dataseg_reset_state;
+ vmcb->ss = dataseg_reset_state;
vmcb->tr.selector = 0;
vmcb->tr.base = 0;
vmcb->ldtr.limit = 0xffff;
vmcb->ldtr.access_rights = 0x0082;
- vmcb->gdtr.selector = 0;
- vmcb->gdtr.base = 0;
- vmcb->gdtr.limit = 0xffff;
- vmcb->gdtr.access_rights = 0;
-
- vmcb->idtr.selector = 0;
- vmcb->idtr.base = 0;
- vmcb->idtr.limit = 0xffff;
- vmcb->idtr.access_rights = 0;
+ vmcb->gdtr = dtr_reset_state;
+ vmcb->idtr = dtr_reset_state;
vmcb->efer = EFER_SVME;
vmcb->sysenter_esp = 0;
vmcb->kerngsbase = 0;
- vmcb->g_pat = 0x0007040600070406;
-
vmcb->dr7 = 0x00000400;
- ok &= vcpu_set_cell_config(cpu_data->cell, vmcb);
+ vmcb->eventinj = 0;
- /* This is always false, but to be consistent with vmx.c... */
- if (!ok) {
- panic_printk("FATAL: CPU reset failed\n");
- panic_stop();
- }
+ /* Almost all of the guest state changed */
+ vmcb->clean_bits = 0;
+
+ svm_set_cell_config(cpu_data->cell, vmcb);
+
+ asm volatile(
+ "vmload %%rax"
+ : : "a" (paging_hvirt2phys(vmcb)) : "memory");
+ /* vmload overwrites GS_BASE - restore the host state */
+ write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
}
void vcpu_skip_emulated_instruction(unsigned int inst_len)
{
- struct per_cpu *cpu_data = this_cpu_data();
- struct vmcb *vmcb = &cpu_data->vmcb;
- vmcb->rip += inst_len;
+ this_cpu_data()->vmcb.rip += inst_len;
}
-static void update_efer(struct per_cpu *cpu_data)
+static void update_efer(struct vmcb *vmcb)
{
- struct vmcb *vmcb = &cpu_data->vmcb;
unsigned long efer = vmcb->efer;
if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
vcpu_tlb_flush();
vmcb->efer = efer;
+ vmcb->clean_bits &= ~CLEAN_BITS_CRX;
}
bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
{
- struct per_cpu *cpu_data = this_cpu_data();
- struct vmcb *vmcb = &cpu_data->vmcb;
+ struct vmcb *vmcb = &this_cpu_data()->vmcb;
if (vmcb->efer & EFER_LMA) {
pg_structs->root_paging = x86_64_paging;
- pg_structs->root_table_gphys =
- vmcb->cr3 & 0x000ffffffffff000UL;
+ pg_structs->root_table_gphys = vmcb->cr3 & BIT_MASK(51, 12);
} else if ((vmcb->cr0 & X86_CR0_PG) &&
!(vmcb->cr4 & X86_CR4_PAE)) {
pg_structs->root_paging = i386_paging;
- pg_structs->root_table_gphys =
- vmcb->cr3 & 0xfffff000UL;
+ pg_structs->root_table_gphys = vmcb->cr3 & BIT_MASK(31, 12);
} else if (!(vmcb->cr0 & X86_CR0_PG)) {
/*
* Can be in non-paged protected mode as well, but
return true;
}
+void vcpu_vendor_set_guest_pat(unsigned long val)
+{
+ struct vmcb *vmcb = &this_cpu_data()->vmcb;
+
+ vmcb->g_pat = val;
+ vmcb->clean_bits &= ~CLEAN_BITS_NP;
+}
+
struct parse_context {
unsigned int remaining;
unsigned int size;
return true;
}
-static bool x86_parse_mov_to_cr(struct per_cpu *cpu_data,
- unsigned long pc,
- unsigned char reg,
- unsigned long *gpr)
+static bool svm_parse_mov_to_cr(struct vmcb *vmcb, unsigned long pc,
+ unsigned char reg, unsigned long *gpr)
{
struct guest_paging_structures pg_structs;
- struct vmcb *vmcb = &cpu_data->vmcb;
struct parse_context ctx = {};
/* No prefixes are supported yet */
u8 opcodes[] = {0x0f, 0x22}, modrm;
- bool ok = false;
int n;
ctx.remaining = ARRAY_SIZE(opcodes);
if (!vcpu_get_guest_paging_structs(&pg_structs))
- goto out;
+ return false;
ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
if (!ctx_advance(&ctx, &pc, &pg_structs))
- goto out;
+ return false;
- for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++) {
- if (*(ctx.inst) != opcodes[n])
- goto out;
- if (!ctx_advance(&ctx, &pc, &pg_structs))
- goto out;
- }
+ for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++)
+ if (*(ctx.inst) != opcodes[n] ||
+ !ctx_advance(&ctx, &pc, &pg_structs))
+ return false;
if (!ctx_advance(&ctx, &pc, &pg_structs))
- goto out;
+ return false;
modrm = *(ctx.inst);
if (((modrm & 0x38) >> 3) != reg)
- goto out;
+ return false;
if (gpr)
*gpr = (modrm & 0x7);
- ok = true;
-out:
- return ok;
+ return true;
}
/*
* result in no more than VMEXIT_INVALID. Maybe we can get along without it
* altogether?
*/
-static bool svm_handle_cr(struct registers *guest_regs,
- struct per_cpu *cpu_data)
+static bool svm_handle_cr(struct per_cpu *cpu_data)
{
struct vmcb *vmcb = &cpu_data->vmcb;
/* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
unsigned long reg = -1, val, bits;
- bool ok = true;
if (has_assists) {
if (!(vmcb->exitinfo1 & (1UL << 63))) {
panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
- ok = false;
- goto out;
+ return false;
}
reg = vmcb->exitinfo1 & 0x07;
} else {
- if (!x86_parse_mov_to_cr(cpu_data, vmcb->rip, 0, ®)) {
+ if (!svm_parse_mov_to_cr(vmcb, vmcb->rip, 0, ®)) {
panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
- ok = false;
- goto out;
+ return false;
}
- };
+ }
if (reg == 4)
val = vmcb->rsp;
else
- val = ((unsigned long *)guest_regs)[15 - reg];
+ val = cpu_data->guest_regs.by_index[15 - reg];
vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
/* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
if ((val ^ vmcb->cr0) & bits)
vcpu_tlb_flush();
/* TODO: better check for #GP reasons */
- vmcb->cr0 = val & SVM_CR0_CLEARED_BITS;
+ vmcb->cr0 = val & SVM_CR0_ALLOWED_BITS;
if (val & X86_CR0_PG)
- update_efer(cpu_data);
+ update_efer(vmcb);
+ vmcb->clean_bits &= ~CLEAN_BITS_CRX;
-out:
- return ok;
-}
-
-static bool svm_handle_msr_read(struct registers *guest_regs,
- struct per_cpu *cpu_data)
-{
- if (guest_regs->rcx >= MSR_X2APIC_BASE &&
- guest_regs->rcx <= MSR_X2APIC_END) {
- vcpu_skip_emulated_instruction(X86_INST_LEN_RDMSR);
- x2apic_handle_read(guest_regs);
- return true;
- } else {
- panic_printk("FATAL: Unhandled MSR read: %x\n",
- guest_regs->rcx);
- return false;
- }
+ return true;
}
-static bool svm_handle_msr_write(struct registers *guest_regs,
- struct per_cpu *cpu_data)
+static bool svm_handle_msr_write(struct per_cpu *cpu_data)
{
struct vmcb *vmcb = &cpu_data->vmcb;
unsigned long efer;
- bool result = true;
- if (guest_regs->rcx >= MSR_X2APIC_BASE &&
- guest_regs->rcx <= MSR_X2APIC_END) {
- result = x2apic_handle_write(guest_regs, cpu_data);
- goto out;
- }
- if (guest_regs->rcx == MSR_EFER) {
+ if (cpu_data->guest_regs.rcx == MSR_EFER) {
/* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
- efer = (guest_regs->rax & 0xffffffff) |
- (guest_regs->rdx << 32) | EFER_SVME;
+ efer = get_wrmsr_value(&cpu_data->guest_regs) | EFER_SVME;
/* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
vcpu_tlb_flush();
vmcb->efer = efer;
- goto out;
+ vmcb->clean_bits &= ~CLEAN_BITS_CRX;
+ vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
+ return true;
}
- result = false;
- panic_printk("FATAL: Unhandled MSR write: %x\n",
- guest_regs->rcx);
-out:
- if (result)
- vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
- return result;
+ return vcpu_handle_msr_write();
}
/*
* TODO: This handles unaccelerated (non-AVIC) access. AVIC should
* be treated separately in svm_handle_avic_access().
*/
-static bool svm_handle_apic_access(struct registers *guest_regs,
- struct per_cpu *cpu_data)
+static bool svm_handle_apic_access(struct vmcb *vmcb)
{
- struct vmcb *vmcb = &cpu_data->vmcb;
struct guest_paging_structures pg_structs;
unsigned int inst_len, offset;
bool is_write;
if (!vcpu_get_guest_paging_structs(&pg_structs))
goto out_err;
- inst_len = apic_mmio_access(guest_regs, cpu_data, vmcb->rip,
- &pg_structs, offset >> 4, is_write);
+ inst_len = apic_mmio_access(vmcb->rip, &pg_structs, offset >> 4,
+ is_write);
if (!inst_len)
goto out_err;
return false;
}
-static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
+static void dump_guest_regs(union registers *guest_regs, struct vmcb *vmcb)
{
panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
vmcb->rsp, vmcb->rflags);
panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
guest_regs->rsi, guest_regs->rdi);
panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
- vmcb->cs.selector,
- vmcb->cs.base,
- vmcb->cs.access_rights,
- (vmcb->efer & EFER_LMA));
+ vmcb->cs.selector, vmcb->cs.base, vmcb->cs.access_rights,
+ !!(vmcb->efer & EFER_LMA));
panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
vmcb->cr3, vmcb->cr4);
panic_printk("EFER: %p\n", vmcb->efer);
}
-static void vcpu_vendor_get_pf_intercept(struct per_cpu *cpu_data,
- struct vcpu_pf_intercept *out)
+void vcpu_vendor_get_io_intercept(struct vcpu_io_intercept *io)
{
- struct vmcb *vmcb = &cpu_data->vmcb;
+ struct vmcb *vmcb = &this_cpu_data()->vmcb;
+ u64 exitinfo = vmcb->exitinfo1;
- out->phys_addr = vmcb->exitinfo2;
- out->is_write = !!(vmcb->exitinfo1 & 0x2);
+ /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
+ io->port = (exitinfo >> 16) & 0xFFFF;
+ io->size = (exitinfo >> 4) & 0x7;
+ io->in = !!(exitinfo & 0x1);
+ io->inst_len = vmcb->exitinfo2 - vmcb->rip;
+ io->rep_or_str = !!(exitinfo & 0x0c);
}
-static void vcpu_vendor_get_io_intercept(struct per_cpu *cpu_data,
- struct vcpu_io_intercept *out)
+void vcpu_vendor_get_mmio_intercept(struct vcpu_mmio_intercept *mmio)
{
- struct vmcb *vmcb = &cpu_data->vmcb;
- u64 exitinfo = vmcb->exitinfo1;
+ struct vmcb *vmcb = &this_cpu_data()->vmcb;
- /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
- out->port = (exitinfo >> 16) & 0xFFFF;
- out->size = (exitinfo >> 4) & 0x7;
- out->in = !!(exitinfo & 0x1);
- out->inst_len = vmcb->exitinfo2 - vmcb->rip;
- out->rep_or_str = !!(exitinfo & 0x0c);
+ mmio->phys_addr = vmcb->exitinfo2;
+ mmio->is_write = !!(vmcb->exitinfo1 & 0x2);
}
-void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
+void vcpu_handle_exit(struct per_cpu *cpu_data)
{
struct vmcb *vmcb = &cpu_data->vmcb;
- struct vcpu_execution_state x_state;
- struct vcpu_pf_intercept pf;
- struct vcpu_io_intercept io;
bool res = false;
- int sipi_vector;
+
+ vmcb->gs.base = read_msr(MSR_GS_BASE);
/* Restore GS value expected by per_cpu data accessors */
write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
+ /*
+ * All guest state is marked unmodified; individual handlers must clear
+ * the bits as needed.
+ */
+ vmcb->clean_bits = 0xffffffff;
switch (vmcb->exitcode) {
case VMEXIT_INVALID:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
/* Temporarily enable GIF to consume pending NMI */
asm volatile("stgi; clgi" : : : "memory");
- sipi_vector = x86_handle_events(cpu_data);
- if (sipi_vector >= 0) {
- printk("CPU %d received SIPI, vector %x\n",
- cpu_data->cpu_id, sipi_vector);
- vcpu_reset(cpu_data, sipi_vector);
- memset(guest_regs, 0, sizeof(*guest_regs));
- }
- iommu_check_pending_faults(cpu_data);
- return;
- case VMEXIT_CPUID:
- /* FIXME: We are not intercepting CPUID now */
- return;
+ x86_check_events();
+ goto vmentry;
case VMEXIT_VMMCALL:
- vcpu_vendor_get_execution_state(&x_state);
- vcpu_handle_hypercall(guest_regs, &x_state);
- return;
+ vcpu_handle_hypercall();
+ goto vmentry;
case VMEXIT_CR0_SEL_WRITE:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
- if (svm_handle_cr(guest_regs, cpu_data))
- return;
+ if (svm_handle_cr(cpu_data))
+ goto vmentry;
break;
+ case VMEXIT_CPUID:
+ vcpu_handle_cpuid();
+ goto vmentry;
case VMEXIT_MSR:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
if (!vmcb->exitinfo1)
- res = svm_handle_msr_read(guest_regs, cpu_data);
+ res = vcpu_handle_msr_read();
else
- res = svm_handle_msr_write(guest_regs, cpu_data);
+ res = svm_handle_msr_write(cpu_data);
if (res)
- return;
+ goto vmentry;
break;
case VMEXIT_NPF:
if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
/* APIC access in non-AVIC mode */
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
- if (svm_handle_apic_access(guest_regs, cpu_data))
- return;
+ if (svm_handle_apic_access(vmcb))
+ goto vmentry;
} else {
/* General MMIO (IOAPIC, PCI etc) */
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
- vcpu_vendor_get_pf_intercept(cpu_data, &pf);
- if (vcpu_handle_pt_violation(guest_regs, &pf))
- return;
+ if (vcpu_handle_mmio_access())
+ goto vmentry;
}
-
- panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
- "error code is %x\n", vmcb->exitinfo2,
- vmcb->exitinfo1 & 0xf);
break;
case VMEXIT_XSETBV:
- cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XSETBV]++;
- if ((guest_regs->rax & X86_XCR0_FP) &&
- (guest_regs->rax & ~cpuid_eax(0x0d)) == 0 &&
- guest_regs->rcx == 0 && guest_regs->rdx == 0) {
- vcpu_skip_emulated_instruction(X86_INST_LEN_XSETBV);
- asm volatile(
- "xsetbv"
- : /* no output */
- : "a" (guest_regs->rax), "c" (0), "d" (0));
- return;
- }
- panic_printk("FATAL: Invalid xsetbv parameters: "
- "xcr[%d] = %x:%x\n", guest_regs->rcx,
- guest_regs->rdx, guest_regs->rax);
+ if (vcpu_handle_xsetbv())
+ goto vmentry;
break;
case VMEXIT_IOIO:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
- vcpu_vendor_get_io_intercept(cpu_data, &io);
- if (vcpu_handle_io_access(guest_regs, &io))
- return;
+ if (vcpu_handle_io_access())
+ goto vmentry;
break;
+ case VMEXIT_EXCEPTION_DB:
+ case VMEXIT_EXCEPTION_AC:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_EXCEPTION]++;
+ /* Reinject exception, including error code if needed. */
+ vmcb->eventinj = (vmcb->exitcode - VMEXIT_EXCEPTION_DE) |
+ SVM_EVENTINJ_EXCEPTION | SVM_EVENTINJ_VALID;
+ if (vmcb->exitcode == VMEXIT_EXCEPTION_AC) {
+ vmcb->eventinj |= SVM_EVENTINJ_ERR_VALID;
+ vmcb->eventinj_err = vmcb->exitinfo1;
+ }
+ x86_check_events();
+ goto vmentry;
/* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
default:
panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
"exitinfo1 %p exitinfo2 %p\n",
vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
}
- dump_guest_regs(guest_regs, vmcb);
+ dump_guest_regs(&cpu_data->guest_regs, vmcb);
panic_park();
+
+vmentry:
+ write_msr(MSR_GS_BASE, vmcb->gs.base);
}
-void vcpu_park(struct per_cpu *cpu_data)
+void vcpu_park(void)
{
- /* TODO: Implement */
+ vcpu_vendor_reset(APIC_BSP_PSEUDO_SIPI);
+ /* No need to clear VMCB Clean bit: vcpu_vendor_reset() already does
+ * this. */
+ this_cpu_data()->vmcb.n_cr3 = paging_hvirt2phys(parked_mode_npt);
+
+ vcpu_tlb_flush();
}
-void vcpu_nmi_handler(struct per_cpu *cpu_data)
+void vcpu_nmi_handler(void)
{
- printk("Consuming pending NMI on CPU %d\n", cpu_data->cpu_id);
}
void vcpu_tlb_flush(void)
{
- struct per_cpu *cpu_data = this_cpu_data();
- struct vmcb *vmcb = &cpu_data->vmcb;
+ struct vmcb *vmcb = &this_cpu_data()->vmcb;
if (has_flush_by_asid)
vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
unsigned long pc, unsigned int *size)
{
- struct per_cpu *cpu_data = this_cpu_data();
- struct vmcb *vmcb = &cpu_data->vmcb;
+ struct vmcb *vmcb = &this_cpu_data()->vmcb;
unsigned long start;
if (has_assists) {
void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
struct vcpu_io_bitmap *iobm)
{
- iobm->data = cell->svm.iopm;
- iobm->size = sizeof(cell->svm.iopm);
+ iobm->data = cell->arch.svm.iopm;
+ iobm->size = IOPM_PAGES * PAGE_SIZE;
}
void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
{
- struct per_cpu *cpu_data = this_cpu_data();
+ struct vmcb *vmcb = &this_cpu_data()->vmcb;
+
+ x_state->efer = vmcb->efer;
+ x_state->rflags = vmcb->rflags;
+ x_state->cs = vmcb->cs.selector;
+ x_state->rip = vmcb->rip;
+}
- x_state->efer = cpu_data->vmcb.efer;
- x_state->rflags = cpu_data->vmcb.rflags;
- x_state->cs = cpu_data->vmcb.cs.selector;
- x_state->rip = cpu_data->vmcb.rip;
+/* GIF must be set for interrupts to be delivered (APMv2, Sect. 15.17) */
+void enable_irq(void)
+{
+ asm volatile("stgi; sti" : : : "memory");
+}
+
+/* Jailhouse runs with GIF cleared, so we need to restore this state */
+void disable_irq(void)
+{
+ asm volatile("cli; clgi" : : : "memory");
}