u32 size;
};
+struct vcpu_execution_state {
+ u64 efer;
+ u64 rflags;
+ u16 cs;
+ u64 rip;
+};
+
int vcpu_vendor_init(void);
int vcpu_cell_init(struct cell *cell);
void vcpu_exit(struct per_cpu *cpu_data);
void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data);
+void __attribute__((noreturn))
+vcpu_deactivate_vmm(struct registers *guest_regs);
+
void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data);
void vcpu_park(struct per_cpu *cpu_data);
void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
struct vcpu_io_bitmap *out);
+void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state);
+
+void vcpu_handle_hypercall(struct registers *guest_regs,
+ struct vcpu_execution_state *x_state);
+
bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs);
#endif
#include <jailhouse/control.h>
#include <jailhouse/paging.h>
+#include <jailhouse/printk.h>
#include <jailhouse/string.h>
#include <jailhouse/types.h>
#include <asm/i8042.h>
vcpu_vendor_cell_exit(cell);
}
+
+void vcpu_handle_hypercall(struct registers *guest_regs,
+ struct vcpu_execution_state *x_state)
+{
+ bool long_mode = !!(x_state->efer & EFER_LMA);
+ unsigned long arg_mask = long_mode ? (u64)-1 : (u32)-1;
+ struct per_cpu *cpu_data = this_cpu_data();
+ unsigned long code = guest_regs->rax;
+
+ vcpu_skip_emulated_instruction(X86_INST_LEN_VMCALL);
+
+ if ((!long_mode && (x_state->rflags & X86_RFLAGS_VM)) ||
+ (x_state->cs & 3) != 0) {
+ guest_regs->rax = -EPERM;
+ return;
+ }
+
+ guest_regs->rax = hypercall(code, guest_regs->rdi & arg_mask,
+ guest_regs->rsi & arg_mask);
+ if (guest_regs->rax == -ENOSYS)
+ printk("CPU %d: Unknown vmcall %d, RIP: %p\n",
+ cpu_data->cpu_id, code,
+ x_state->rip - X86_INST_LEN_VMCALL);
+
+ if (code == JAILHOUSE_HC_DISABLE && guest_regs->rax == 0)
+ vcpu_deactivate_vmm(guest_regs);
+}
panic_stop();
}
-static void __attribute__((noreturn))
-vmx_cpu_deactivate_vmm(struct registers *guest_regs)
+void __attribute__((noreturn))
+vcpu_deactivate_vmm(struct registers *guest_regs)
{
unsigned long *stack = (unsigned long *)vmcs_read64(GUEST_RSP);
unsigned long linux_ip = vmcs_read64(GUEST_RIP);
vmcs_read32(VM_ENTRY_CONTROLS) | VM_ENTRY_IA32E_MODE);
}
-static void vcpu_handle_hypercall(struct registers *guest_regs)
-{
- bool ia32e_mode = !!(vmcs_read64(GUEST_IA32_EFER) & EFER_LMA);
- unsigned long arg_mask = ia32e_mode ? (u64)-1 : (u32)-1;
- unsigned long code = guest_regs->rax;
-
- vcpu_skip_emulated_instruction(X86_INST_LEN_VMCALL);
-
- if ((!ia32e_mode && vmcs_read64(GUEST_RFLAGS) & X86_RFLAGS_VM) ||
- (vmcs_read16(GUEST_CS_SELECTOR) & 3) != 0) {
- guest_regs->rax = -EPERM;
- return;
- }
-
- guest_regs->rax = hypercall(code, guest_regs->rdi & arg_mask,
- guest_regs->rsi & arg_mask);
- if (guest_regs->rax == -ENOSYS)
- printk("CPU %d: Unknown vmcall %d, RIP: %p\n", this_cpu_id(),
- code, vmcs_read64(GUEST_RIP) - X86_INST_LEN_VMCALL);
-
- if (code == JAILHOUSE_HC_DISABLE && guest_regs->rax == 0)
- vmx_cpu_deactivate_vmm(guest_regs);
-}
-
static bool vmx_handle_cr(struct registers *guest_regs,
struct per_cpu *cpu_data)
{
void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
{
u32 reason = vmcs_read32(VM_EXIT_REASON);
+ struct vcpu_execution_state x_state;
int sipi_vector;
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
(u32 *)&guest_regs->rcx, (u32 *)&guest_regs->rdx);
return;
case EXIT_REASON_VMCALL:
- vcpu_handle_hypercall(guest_regs);
+ vcpu_vendor_get_execution_state(&x_state);
+ vcpu_handle_hypercall(guest_regs, &x_state);
return;
case EXIT_REASON_CR_ACCESS:
cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
iobm->data = cell->vmx.io_bitmap;
iobm->size = sizeof(cell->vmx.io_bitmap);
}
+
+void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
+{
+ x_state->efer = vmcs_read64(GUEST_IA32_EFER);
+ x_state->rflags = vmcs_read64(GUEST_RFLAGS);
+ x_state->cs = vmcs_read16(GUEST_CS_SELECTOR);
+ x_state->rip = vmcs_read64(GUEST_RIP);
+}