return true;
}
+static bool svm_handle_msr_read(struct registers *guest_regs,
+ struct per_cpu *cpu_data)
+{
+ if (guest_regs->rcx >= MSR_X2APIC_BASE &&
+ guest_regs->rcx <= MSR_X2APIC_END) {
+ vcpu_skip_emulated_instruction(X86_INST_LEN_RDMSR);
+ x2apic_handle_read(guest_regs);
+ return true;
+ } else {
+ panic_printk("FATAL: Unhandled MSR read: %x\n",
+ guest_regs->rcx);
+ return false;
+ }
+}
+
+static bool svm_handle_msr_write(struct registers *guest_regs,
+ struct per_cpu *cpu_data)
+{
+ struct vmcb *vmcb = &cpu_data->vmcb;
+ unsigned long efer;
+ bool result = true;
+
+ if (guest_regs->rcx >= MSR_X2APIC_BASE &&
+ guest_regs->rcx <= MSR_X2APIC_END) {
+ result = x2apic_handle_write(guest_regs, cpu_data);
+ goto out;
+ }
+ if (guest_regs->rcx == MSR_EFER) {
+ /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
+ efer = (guest_regs->rax & 0xffffffff) |
+ (guest_regs->rdx << 32) | EFER_SVME;
+ /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
+ if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
+ vcpu_tlb_flush();
+ vmcb->efer = efer;
+ goto out;
+ }
+
+ result = false;
+ panic_printk("FATAL: Unhandled MSR write: %x\n",
+ guest_regs->rcx);
+out:
+ if (result)
+ vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
+ return result;
+}
+
static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
{
panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
{
struct vmcb *vmcb = &cpu_data->vmcb;
struct vcpu_execution_state x_state;
+ bool res = false;
/* Restore GS value expected by per_cpu data accessors */
write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
vcpu_vendor_get_execution_state(&x_state);
vcpu_handle_hypercall(guest_regs, &x_state);
return;
+ case VMEXIT_MSR:
+ cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
+ if (!vmcb->exitinfo1)
+ res = svm_handle_msr_read(guest_regs, cpu_data);
+ else
+ res = svm_handle_msr_write(guest_regs, cpu_data);
+ if (res)
+ return;
+ break;
default:
panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
"exitinfo1 %p exitinfo2 %p\n",