2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * Based on vmx.c written by Jan Kiszka.
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/control.h>
20 #include <jailhouse/paging.h>
21 #include <jailhouse/printk.h>
22 #include <jailhouse/processor.h>
23 #include <jailhouse/string.h>
24 #include <jailhouse/utils.h>
27 #include <asm/control.h>
28 #include <asm/iommu.h>
29 #include <asm/paging.h>
30 #include <asm/percpu.h>
31 #include <asm/processor.h>
36 * NW bit is ignored by all modern processors, however some
37 * combinations of NW and CD bits are prohibited by SVM (see APMv2,
38 * Sect. 15.5). To handle this, we always keep the NW bit off.
40 #define SVM_CR0_CLEARED_BITS ~X86_CR0_NW
42 static bool has_avic, has_assists, has_flush_by_asid;
44 static const struct segment invalid_seg;
46 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
48 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
49 [ SVM_MSRPM_0000 ] = {
50 [ 0/4 ... 0x017/4 ] = 0,
51 [ 0x018/4 ... 0x01b/4 ] = 0x80, /* 0x01b (w) */
52 [ 0x01c/4 ... 0x7ff/4 ] = 0,
53 /* x2APIC MSRs - emulated if not present */
54 [ 0x800/4 ... 0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
55 [ 0x804/4 ... 0x807/4 ] = 0,
56 [ 0x808/4 ... 0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
57 [ 0x80c/4 ... 0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
58 [ 0x810/4 ... 0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
59 [ 0x814/4 ... 0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
60 [ 0x818/4 ... 0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
61 [ 0x81c/4 ... 0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
62 [ 0x820/4 ... 0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
63 [ 0x824/4 ... 0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
64 [ 0x828/4 ... 0x82b/4 ] = 0x03, /* 0x828 (rw) */
65 [ 0x82c/4 ... 0x82f/4 ] = 0xc0, /* 0x82f (rw) */
66 [ 0x830/4 ... 0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
67 [ 0x834/4 ... 0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
68 [ 0x838/4 ... 0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
69 [ 0x83c/4 ... 0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
70 [ 0x840/4 ... 0x1fff/4 ] = 0,
72 [ SVM_MSRPM_C000 ] = {
73 [ 0/4 ... 0x07f/4 ] = 0,
74 [ 0x080/4 ... 0x083/4 ] = 0x02, /* 0x080 (w) */
75 [ 0x084/4 ... 0x1fff/4 ] = 0
77 [ SVM_MSRPM_C001 ] = {
78 [ 0/4 ... 0x1fff/4 ] = 0,
80 [ SVM_MSRPM_RESV ] = {
81 [ 0/4 ... 0x1fff/4 ] = 0,
85 static void *avic_page;
87 static int svm_check_features(void)
89 /* SVM is available */
90 if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
94 if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
98 if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
102 if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
105 /* TLB Flush by ASID support */
106 if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
107 has_flush_by_asid = true;
112 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
113 const struct desc_table_reg *dtr)
115 struct svm_segment tmp = { 0 };
118 tmp.base = dtr->base;
119 tmp.limit = dtr->limit & 0xffff;
125 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
126 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
127 const struct segment *segment)
131 svm_segment->selector = segment->selector;
133 if (segment->access_rights == 0x10000) {
134 svm_segment->access_rights = 0;
136 ar = segment->access_rights;
137 svm_segment->access_rights =
138 ((ar & 0xf000) >> 4) | (ar & 0x00ff);
141 svm_segment->limit = segment->limit;
142 svm_segment->base = segment->base;
145 static bool vcpu_set_cell_config(struct cell *cell, struct vmcb *vmcb)
147 /* No real need for this function; used for consistency with vmx.c */
148 vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
149 vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
154 static int vmcb_setup(struct per_cpu *cpu_data)
156 struct vmcb *vmcb = &cpu_data->vmcb;
158 memset(vmcb, 0, sizeof(struct vmcb));
160 vmcb->cr0 = read_cr0() & SVM_CR0_CLEARED_BITS;
161 vmcb->cr3 = cpu_data->linux_cr3;
162 vmcb->cr4 = read_cr4();
164 set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
165 set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
166 set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
167 set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
168 set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
169 set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
170 set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
172 set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
173 set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
174 set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
176 vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
179 vmcb->rsp = cpu_data->linux_sp +
180 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
181 vmcb->rip = cpu_data->linux_ip;
183 vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
184 vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
185 vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
186 vmcb->star = read_msr(MSR_STAR);
187 vmcb->lstar = read_msr(MSR_LSTAR);
188 vmcb->cstar = read_msr(MSR_CSTAR);
189 vmcb->sfmask = read_msr(MSR_SFMASK);
190 vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
192 vmcb->dr6 = 0x00000ff0;
193 vmcb->dr7 = 0x00000400;
195 /* Make the hypervisor visible */
196 vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
198 /* Linux uses custom PAT setting */
199 vmcb->g_pat = read_msr(MSR_IA32_PAT);
201 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
202 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
203 /* TODO: Do we need this for SVM ? */
204 /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
205 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
206 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
207 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
209 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
210 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
212 vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
215 /* No more than one guest owns the CPU */
216 vmcb->guest_asid = 1;
218 /* TODO: Setup AVIC */
220 return vcpu_set_cell_config(cpu_data->cell, vmcb);
223 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
227 return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
231 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
233 /* See APMv2, Section 15.25.5 */
234 *pte = (next_pt & 0x000ffffffffff000UL) |
235 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
238 int vcpu_vendor_init(void)
243 err = svm_check_features();
247 vm_cr = read_msr(MSR_VM_CR);
248 if (vm_cr & VM_CR_SVMDIS)
249 /* SVM disabled in BIOS */
252 /* Nested paging is the same as the native one */
253 memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
254 for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
255 npt_paging[n].set_next_pt = npt_set_next_pt;
257 /* This is always false for AMD now (except in nested SVM);
258 see Sect. 16.3.1 in APMv2 */
260 /* allow direct x2APIC access except for ICR writes */
261 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
262 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
263 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
265 /* Enable Extended Interrupt LVT */
266 apic_reserved_bits[0x50] = 0;
268 avic_page = page_alloc(&remap_pool, 1);
274 return vcpu_cell_init(&root_cell);
277 int vcpu_vendor_cell_init(struct cell *cell)
282 /* allocate iopm (two 4-K pages + 3 bits) */
283 cell->svm.iopm = page_alloc(&mem_pool, 3);
287 /* build root NPT of cell */
288 cell->svm.npt_structs.root_paging = npt_paging;
289 cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
290 if (!cell->svm.npt_structs.root_table)
295 * Map xAPIC as is; reads are passed, writes are trapped.
297 flags = PAGE_READONLY_FLAGS |
300 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
301 PAGE_SIZE, XAPIC_BASE,
303 PAGING_NON_COHERENT);
305 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED;
306 err = paging_create(&cell->svm.npt_structs,
307 paging_hvirt2phys(avic_page),
308 PAGE_SIZE, XAPIC_BASE,
310 PAGING_NON_COHERENT);
316 int vcpu_map_memory_region(struct cell *cell,
317 const struct jailhouse_memory *mem)
319 u64 phys_start = mem->phys_start;
320 u32 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
322 if (mem->flags & JAILHOUSE_MEM_READ)
323 flags |= PAGE_FLAG_PRESENT;
324 if (mem->flags & JAILHOUSE_MEM_WRITE)
325 flags |= PAGE_FLAG_RW;
326 if (mem->flags & JAILHOUSE_MEM_EXECUTE)
327 flags |= PAGE_FLAG_EXECUTE;
328 if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
329 phys_start = paging_hvirt2phys(&cell->comm_page);
331 return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
332 mem->virt_start, flags, PAGING_NON_COHERENT);
335 int vcpu_unmap_memory_region(struct cell *cell,
336 const struct jailhouse_memory *mem)
338 return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
339 mem->size, PAGING_NON_COHERENT);
342 void vcpu_vendor_cell_exit(struct cell *cell)
344 paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
345 PAGING_NON_COHERENT);
346 page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
349 int vcpu_init(struct per_cpu *cpu_data)
354 err = svm_check_features();
358 efer = read_msr(MSR_EFER);
359 if (efer & EFER_SVME)
363 write_msr(MSR_EFER, efer);
365 cpu_data->svm_state = SVMON;
367 if (!vmcb_setup(cpu_data))
370 write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
372 /* Enable Extended Interrupt LVT (xAPIC, as it is AMD-only) */
374 apic_reserved_bits[0x50] = 0;
379 void vcpu_exit(struct per_cpu *cpu_data)
383 if (cpu_data->svm_state == SVMOFF)
386 cpu_data->svm_state = SVMOFF;
388 efer = read_msr(MSR_EFER);
390 write_msr(MSR_EFER, efer);
392 write_msr(MSR_VM_HSAVE_PA, 0);
395 void vcpu_activate_vmm(struct per_cpu *cpu_data)
397 /* TODO: Implement */
398 __builtin_unreachable();
401 void __attribute__((noreturn))
402 vcpu_deactivate_vmm(struct registers *guest_regs)
404 /* TODO: Implement */
405 __builtin_unreachable();
408 static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
410 struct vmcb *vmcb = &cpu_data->vmcb;
414 vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
421 if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
428 vmcb->cs.selector = sipi_vector << 8;
429 vmcb->cs.base = sipi_vector << 12;
430 vmcb->cs.limit = 0xffff;
431 vmcb->cs.access_rights = 0x009b;
433 vmcb->ds.selector = 0;
435 vmcb->ds.limit = 0xffff;
436 vmcb->ds.access_rights = 0x0093;
438 vmcb->es.selector = 0;
440 vmcb->es.limit = 0xffff;
441 vmcb->es.access_rights = 0x0093;
443 vmcb->fs.selector = 0;
445 vmcb->fs.limit = 0xffff;
446 vmcb->fs.access_rights = 0x0093;
448 vmcb->gs.selector = 0;
450 vmcb->gs.limit = 0xffff;
451 vmcb->gs.access_rights = 0x0093;
453 vmcb->ss.selector = 0;
455 vmcb->ss.limit = 0xffff;
456 vmcb->ss.access_rights = 0x0093;
458 vmcb->tr.selector = 0;
460 vmcb->tr.limit = 0xffff;
461 vmcb->tr.access_rights = 0x008b;
463 vmcb->ldtr.selector = 0;
465 vmcb->ldtr.limit = 0xffff;
466 vmcb->ldtr.access_rights = 0x0082;
468 vmcb->gdtr.selector = 0;
470 vmcb->gdtr.limit = 0xffff;
471 vmcb->gdtr.access_rights = 0;
473 vmcb->idtr.selector = 0;
475 vmcb->idtr.limit = 0xffff;
476 vmcb->idtr.access_rights = 0;
478 vmcb->efer = EFER_SVME;
480 /* These MSRs are undefined on reset */
485 vmcb->sysenter_cs = 0;
486 vmcb->sysenter_eip = 0;
487 vmcb->sysenter_esp = 0;
488 vmcb->kerngsbase = 0;
490 vmcb->g_pat = 0x0007040600070406;
492 vmcb->dr7 = 0x00000400;
494 ok &= vcpu_set_cell_config(cpu_data->cell, vmcb);
496 /* This is always false, but to be consistent with vmx.c... */
498 panic_printk("FATAL: CPU reset failed\n");
503 void vcpu_skip_emulated_instruction(unsigned int inst_len)
505 struct per_cpu *cpu_data = this_cpu_data();
506 struct vmcb *vmcb = &cpu_data->vmcb;
507 vmcb->rip += inst_len;
510 static void update_efer(struct per_cpu *cpu_data)
512 struct vmcb *vmcb = &cpu_data->vmcb;
513 unsigned long efer = vmcb->efer;
515 if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
520 /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
521 if ((vmcb->efer ^ efer) & EFER_LMA)
527 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
529 struct per_cpu *cpu_data = this_cpu_data();
530 struct vmcb *vmcb = &cpu_data->vmcb;
532 if (vmcb->efer & EFER_LMA) {
533 pg_structs->root_paging = x86_64_paging;
534 pg_structs->root_table_gphys =
535 vmcb->cr3 & 0x000ffffffffff000UL;
536 } else if ((vmcb->cr0 & X86_CR0_PG) &&
537 !(vmcb->cr4 & X86_CR4_PAE)) {
538 pg_structs->root_paging = i386_paging;
539 pg_structs->root_table_gphys =
540 vmcb->cr3 & 0xfffff000UL;
541 } else if (!(vmcb->cr0 & X86_CR0_PG)) {
543 * Can be in non-paged protected mode as well, but
544 * the translation mechanism will stay the same ayway.
546 pg_structs->root_paging = realmode_paging;
548 * This will make paging_get_guest_pages map the page
549 * that also contains the bootstrap code and, thus, is
550 * always present in a cell.
552 pg_structs->root_table_gphys = 0xff000;
554 printk("FATAL: Unsupported paging mode\n");
560 struct parse_context {
561 unsigned int remaining;
563 unsigned long cs_base;
567 static bool ctx_advance(struct parse_context *ctx,
569 struct guest_paging_structures *pg_structs)
572 ctx->size = ctx->remaining;
573 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
577 ctx->remaining -= ctx->size;
583 static bool x86_parse_mov_to_cr(struct per_cpu *cpu_data,
588 struct guest_paging_structures pg_structs;
589 struct vmcb *vmcb = &cpu_data->vmcb;
590 struct parse_context ctx = {};
591 /* No prefixes are supported yet */
592 u8 opcodes[] = {0x0f, 0x22}, modrm;
596 ctx.remaining = ARRAY_SIZE(opcodes);
597 if (!vcpu_get_guest_paging_structs(&pg_structs))
599 ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
601 if (!ctx_advance(&ctx, &pc, &pg_structs))
604 for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++) {
605 if (*(ctx.inst) != opcodes[n])
607 if (!ctx_advance(&ctx, &pc, &pg_structs))
611 if (!ctx_advance(&ctx, &pc, &pg_structs))
616 if (((modrm & 0x38) >> 3) != reg)
620 *gpr = (modrm & 0x7);
628 * XXX: The only visible reason to have this function (vmx.c consistency
629 * aside) is to prevent cells from setting invalid CD+NW combinations that
630 * result in no more than VMEXIT_INVALID. Maybe we can get along without it
633 static bool svm_handle_cr(struct registers *guest_regs,
634 struct per_cpu *cpu_data)
636 struct vmcb *vmcb = &cpu_data->vmcb;
637 /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
638 unsigned long reg = -1, val, bits;
642 if (!(vmcb->exitinfo1 & (1UL << 63))) {
643 panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
647 reg = vmcb->exitinfo1 & 0x07;
649 if (!x86_parse_mov_to_cr(cpu_data, vmcb->rip, 0, ®)) {
650 panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
659 val = ((unsigned long *)guest_regs)[15 - reg];
661 vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
662 /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
663 bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
664 if ((val ^ vmcb->cr0) & bits)
666 /* TODO: better check for #GP reasons */
667 vmcb->cr0 = val & SVM_CR0_CLEARED_BITS;
668 if (val & X86_CR0_PG)
669 update_efer(cpu_data);
675 static bool svm_handle_msr_read(struct registers *guest_regs,
676 struct per_cpu *cpu_data)
678 if (guest_regs->rcx >= MSR_X2APIC_BASE &&
679 guest_regs->rcx <= MSR_X2APIC_END) {
680 vcpu_skip_emulated_instruction(X86_INST_LEN_RDMSR);
681 x2apic_handle_read(guest_regs);
684 panic_printk("FATAL: Unhandled MSR read: %x\n",
690 static bool svm_handle_msr_write(struct registers *guest_regs,
691 struct per_cpu *cpu_data)
693 struct vmcb *vmcb = &cpu_data->vmcb;
697 if (guest_regs->rcx >= MSR_X2APIC_BASE &&
698 guest_regs->rcx <= MSR_X2APIC_END) {
699 result = x2apic_handle_write(guest_regs, cpu_data);
702 if (guest_regs->rcx == MSR_EFER) {
703 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
704 efer = (guest_regs->rax & 0xffffffff) |
705 (guest_regs->rdx << 32) | EFER_SVME;
706 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
707 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
714 panic_printk("FATAL: Unhandled MSR write: %x\n",
718 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
723 * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
724 * be treated separately in svm_handle_avic_access().
726 static bool svm_handle_apic_access(struct registers *guest_regs,
727 struct per_cpu *cpu_data)
729 struct vmcb *vmcb = &cpu_data->vmcb;
730 struct guest_paging_structures pg_structs;
731 unsigned int inst_len, offset;
734 /* The caller is responsible for sanity checks */
735 is_write = !!(vmcb->exitinfo1 & 0x2);
736 offset = vmcb->exitinfo2 - XAPIC_BASE;
741 if (!vcpu_get_guest_paging_structs(&pg_structs))
744 inst_len = apic_mmio_access(guest_regs, cpu_data, vmcb->rip,
745 &pg_structs, offset >> 4, is_write);
749 vcpu_skip_emulated_instruction(inst_len);
753 panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
758 static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
760 panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
761 vmcb->rsp, vmcb->rflags);
762 panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
763 guest_regs->rbx, guest_regs->rcx);
764 panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
765 guest_regs->rsi, guest_regs->rdi);
766 panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
769 vmcb->cs.access_rights,
770 (vmcb->efer & EFER_LMA));
771 panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
772 vmcb->cr3, vmcb->cr4);
773 panic_printk("EFER: %p\n", vmcb->efer);
776 static void vcpu_vendor_get_pf_intercept(struct per_cpu *cpu_data,
777 struct vcpu_pf_intercept *out)
779 struct vmcb *vmcb = &cpu_data->vmcb;
781 out->phys_addr = vmcb->exitinfo2;
782 out->is_write = !!(vmcb->exitinfo1 & 0x2);
785 static void vcpu_vendor_get_io_intercept(struct per_cpu *cpu_data,
786 struct vcpu_io_intercept *out)
788 struct vmcb *vmcb = &cpu_data->vmcb;
789 u64 exitinfo = vmcb->exitinfo1;
791 /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
792 out->port = (exitinfo >> 16) & 0xFFFF;
793 out->size = (exitinfo >> 4) & 0x7;
794 out->in = !!(exitinfo & 0x1);
795 out->inst_len = vmcb->exitinfo2 - vmcb->rip;
796 out->rep_or_str = !!(exitinfo & 0x0c);
799 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
801 struct vmcb *vmcb = &cpu_data->vmcb;
802 struct vcpu_execution_state x_state;
803 struct vcpu_pf_intercept pf;
804 struct vcpu_io_intercept io;
808 /* Restore GS value expected by per_cpu data accessors */
809 write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
811 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
813 switch (vmcb->exitcode) {
815 panic_printk("FATAL: VM-Entry failure, error %d\n",
819 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
820 /* Temporarily enable GIF to consume pending NMI */
821 asm volatile("stgi; clgi" : : : "memory");
822 sipi_vector = x86_handle_events(cpu_data);
823 if (sipi_vector >= 0) {
824 printk("CPU %d received SIPI, vector %x\n",
825 cpu_data->cpu_id, sipi_vector);
826 vcpu_reset(cpu_data, sipi_vector);
827 memset(guest_regs, 0, sizeof(*guest_regs));
829 iommu_check_pending_faults(cpu_data);
832 /* FIXME: We are not intercepting CPUID now */
835 vcpu_vendor_get_execution_state(&x_state);
836 vcpu_handle_hypercall(guest_regs, &x_state);
838 case VMEXIT_CR0_SEL_WRITE:
839 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
840 if (svm_handle_cr(guest_regs, cpu_data))
844 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
845 if (!vmcb->exitinfo1)
846 res = svm_handle_msr_read(guest_regs, cpu_data);
848 res = svm_handle_msr_write(guest_regs, cpu_data);
853 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
854 vmcb->exitinfo2 >= XAPIC_BASE &&
855 vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
856 /* APIC access in non-AVIC mode */
857 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
858 if (svm_handle_apic_access(guest_regs, cpu_data))
861 /* General MMIO (IOAPIC, PCI etc) */
862 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
863 vcpu_vendor_get_pf_intercept(cpu_data, &pf);
864 if (vcpu_handle_pt_violation(guest_regs, &pf))
868 panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
869 "error code is %x\n", vmcb->exitinfo2,
870 vmcb->exitinfo1 & 0xf);
873 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
874 vcpu_vendor_get_io_intercept(cpu_data, &io);
875 if (vcpu_handle_io_access(guest_regs, &io))
878 /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
880 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
881 "exitinfo1 %p exitinfo2 %p\n",
882 vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
884 dump_guest_regs(guest_regs, vmcb);
888 void vcpu_park(struct per_cpu *cpu_data)
890 /* TODO: Implement */
893 void vcpu_nmi_handler(struct per_cpu *cpu_data)
895 printk("Consuming pending NMI on CPU %d\n", cpu_data->cpu_id);
898 void vcpu_tlb_flush(void)
900 struct per_cpu *cpu_data = this_cpu_data();
901 struct vmcb *vmcb = &cpu_data->vmcb;
903 if (has_flush_by_asid)
904 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
906 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
909 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
910 unsigned long pc, unsigned int *size)
912 struct per_cpu *cpu_data = this_cpu_data();
913 struct vmcb *vmcb = &cpu_data->vmcb;
919 start = vmcb->rip - pc;
920 if (start < vmcb->bytes_fetched) {
921 *size = vmcb->bytes_fetched - start;
922 return &vmcb->guest_bytes[start];
927 return vcpu_map_inst(pg_structs, pc, size);
931 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
932 struct vcpu_io_bitmap *iobm)
934 iobm->data = cell->svm.iopm;
935 iobm->size = sizeof(cell->svm.iopm);
938 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
940 struct per_cpu *cpu_data = this_cpu_data();
942 x_state->efer = cpu_data->vmcb.efer;
943 x_state->rflags = cpu_data->vmcb.rflags;
944 x_state->cs = cpu_data->vmcb.cs.selector;
945 x_state->rip = cpu_data->vmcb.rip;