2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * Based on vmx.c written by Jan Kiszka.
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/control.h>
20 #include <jailhouse/paging.h>
21 #include <jailhouse/printk.h>
22 #include <jailhouse/processor.h>
23 #include <jailhouse/string.h>
24 #include <jailhouse/utils.h>
27 #include <asm/control.h>
28 #include <asm/iommu.h>
29 #include <asm/paging.h>
30 #include <asm/percpu.h>
31 #include <asm/processor.h>
36 * NW bit is ignored by all modern processors, however some
37 * combinations of NW and CD bits are prohibited by SVM (see APMv2,
38 * Sect. 15.5). To handle this, we always keep the NW bit off.
40 #define SVM_CR0_ALLOWED_BITS (~X86_CR0_NW)
42 #define MTRR_DEFTYPE 0x2ff
44 #define PAT_RESET_VALUE 0x0007040600070406UL
46 static bool has_avic, has_assists, has_flush_by_asid;
48 static const struct segment invalid_seg;
50 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
52 /* bit cleared: direct access allowed */
53 // TODO: convert to whitelist
54 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
55 [ SVM_MSRPM_0000 ] = {
56 [ 0/4 ... 0x017/4 ] = 0,
57 [ 0x018/4 ... 0x01b/4 ] = 0x80, /* 0x01b (w) */
58 [ 0x01c/4 ... 0x2fb/4 ] = 0,
59 [ 0x2fc/4 ... 0x2ff/4 ] = 0x80, /* 0x2ff (w) */
60 [ 0x300/4 ... 0x7ff/4 ] = 0,
61 /* x2APIC MSRs - emulated if not present */
62 [ 0x800/4 ... 0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
63 [ 0x804/4 ... 0x807/4 ] = 0,
64 [ 0x808/4 ... 0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
65 [ 0x80c/4 ... 0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
66 [ 0x810/4 ... 0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
67 [ 0x814/4 ... 0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
68 [ 0x818/4 ... 0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
69 [ 0x81c/4 ... 0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
70 [ 0x820/4 ... 0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
71 [ 0x824/4 ... 0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
72 [ 0x828/4 ... 0x82b/4 ] = 0x03, /* 0x828 (rw) */
73 [ 0x82c/4 ... 0x82f/4 ] = 0xc0, /* 0x82f (rw) */
74 [ 0x830/4 ... 0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
75 [ 0x834/4 ... 0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
76 [ 0x838/4 ... 0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
77 [ 0x83c/4 ... 0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
78 [ 0x840/4 ... 0x1fff/4 ] = 0,
80 [ SVM_MSRPM_C000 ] = {
81 [ 0/4 ... 0x07f/4 ] = 0,
82 [ 0x080/4 ... 0x083/4 ] = 0x02, /* 0x080 (w) */
83 [ 0x084/4 ... 0x1fff/4 ] = 0
85 [ SVM_MSRPM_C001 ] = {
86 [ 0/4 ... 0x1fff/4 ] = 0,
88 [ SVM_MSRPM_RESV ] = {
89 [ 0/4 ... 0x1fff/4 ] = 0,
93 /* This page is mapped so the code begins at 0x000ffff0 */
94 static u8 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE] = {
95 [0xff0] = 0xfa, /* 1: cli */
96 [0xff1] = 0xf4, /* hlt */
98 [0xff3] = 0xfc /* jmp 1b */
101 static void *parked_mode_npt;
103 static void *avic_page;
105 static int svm_check_features(void)
107 /* SVM is available */
108 if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
109 return trace_error(-ENODEV);
112 if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
113 return trace_error(-EIO);
116 if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
120 if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
123 /* TLB Flush by ASID support */
124 if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
125 has_flush_by_asid = true;
130 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
131 const struct desc_table_reg *dtr)
133 struct svm_segment tmp = { 0 };
136 tmp.base = dtr->base;
137 tmp.limit = dtr->limit & 0xffff;
143 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
144 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
145 const struct segment *segment)
149 svm_segment->selector = segment->selector;
151 if (segment->access_rights == 0x10000) {
152 svm_segment->access_rights = 0;
154 ar = segment->access_rights;
155 svm_segment->access_rights =
156 ((ar & 0xf000) >> 4) | (ar & 0x00ff);
159 svm_segment->limit = segment->limit;
160 svm_segment->base = segment->base;
163 static bool svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
165 /* No real need for this function; used for consistency with vmx.c */
166 vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
167 vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
172 static int vmcb_setup(struct per_cpu *cpu_data)
174 struct vmcb *vmcb = &cpu_data->vmcb;
176 memset(vmcb, 0, sizeof(struct vmcb));
178 vmcb->cr0 = cpu_data->linux_cr0 & SVM_CR0_ALLOWED_BITS;
179 vmcb->cr3 = cpu_data->linux_cr3;
180 vmcb->cr4 = cpu_data->linux_cr4;
182 set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
183 set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
184 set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
185 set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
186 set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
187 set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
188 set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
190 set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
191 set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
192 set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
194 vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
197 /* Indicate success to the caller of arch_entry */
199 vmcb->rsp = cpu_data->linux_sp +
200 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
201 vmcb->rip = cpu_data->linux_ip;
203 vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
204 vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
205 vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
206 vmcb->star = read_msr(MSR_STAR);
207 vmcb->lstar = read_msr(MSR_LSTAR);
208 vmcb->cstar = read_msr(MSR_CSTAR);
209 vmcb->sfmask = read_msr(MSR_SFMASK);
210 vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
212 vmcb->dr6 = 0x00000ff0;
213 vmcb->dr7 = 0x00000400;
215 /* Make the hypervisor visible */
216 vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
218 /* Linux uses custom PAT setting */
219 vmcb->g_pat = read_msr(MSR_IA32_PAT);
221 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
222 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
223 /* TODO: Do we need this for SVM ? */
224 /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
225 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
226 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
227 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
229 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
230 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
232 vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
235 /* No more than one guest owns the CPU */
236 vmcb->guest_asid = 1;
238 /* TODO: Setup AVIC */
240 /* Explicitly mark all of the state as new */
241 vmcb->clean_bits = 0;
243 return svm_set_cell_config(cpu_data->cell, vmcb);
246 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
250 return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
254 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
256 /* See APMv2, Section 15.25.5 */
257 *pte = (next_pt & 0x000ffffffffff000UL) |
258 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
261 int vcpu_vendor_init(void)
263 struct paging_structures parking_pt;
267 err = svm_check_features();
271 vm_cr = read_msr(MSR_VM_CR);
272 if (vm_cr & VM_CR_SVMDIS)
273 /* SVM disabled in BIOS */
274 return trace_error(-EPERM);
276 /* Nested paging is the same as the native one */
277 memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
278 for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
279 npt_paging[n].set_next_pt = npt_set_next_pt;
281 /* Map guest parking code (shared between cells and CPUs) */
282 parking_pt.root_paging = npt_paging;
283 parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
284 if (!parked_mode_npt)
286 err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
287 PAGE_SIZE, 0x000ff000,
288 PAGE_READONLY_FLAGS | PAGE_FLAG_US,
289 PAGING_NON_COHERENT);
293 /* This is always false for AMD now (except in nested SVM);
294 see Sect. 16.3.1 in APMv2 */
296 /* allow direct x2APIC access except for ICR writes */
297 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
298 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
299 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
302 avic_page = page_alloc(&remap_pool, 1);
304 return trace_error(-ENOMEM);
308 return vcpu_cell_init(&root_cell);
311 int vcpu_vendor_cell_init(struct cell *cell)
316 /* allocate iopm (two 4-K pages + 3 bits) */
317 cell->svm.iopm = page_alloc(&mem_pool, 3);
321 /* build root NPT of cell */
322 cell->svm.npt_structs.root_paging = npt_paging;
323 cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
324 if (!cell->svm.npt_structs.root_table)
329 * Map xAPIC as is; reads are passed, writes are trapped.
331 flags = PAGE_READONLY_FLAGS | PAGE_FLAG_US | PAGE_FLAG_DEVICE;
332 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
333 PAGE_SIZE, XAPIC_BASE,
335 PAGING_NON_COHERENT);
337 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE;
338 err = paging_create(&cell->svm.npt_structs,
339 paging_hvirt2phys(avic_page),
340 PAGE_SIZE, XAPIC_BASE,
342 PAGING_NON_COHERENT);
348 int vcpu_map_memory_region(struct cell *cell,
349 const struct jailhouse_memory *mem)
351 u64 phys_start = mem->phys_start;
352 u64 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
354 if (mem->flags & JAILHOUSE_MEM_READ)
355 flags |= PAGE_FLAG_PRESENT;
356 if (mem->flags & JAILHOUSE_MEM_WRITE)
357 flags |= PAGE_FLAG_RW;
358 if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
359 flags |= PAGE_FLAG_NOEXECUTE;
360 if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
361 phys_start = paging_hvirt2phys(&cell->comm_page);
363 return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
364 mem->virt_start, flags, PAGING_NON_COHERENT);
367 int vcpu_unmap_memory_region(struct cell *cell,
368 const struct jailhouse_memory *mem)
370 return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
371 mem->size, PAGING_NON_COHERENT);
374 void vcpu_vendor_cell_exit(struct cell *cell)
376 paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
377 PAGING_NON_COHERENT);
378 page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
381 int vcpu_init(struct per_cpu *cpu_data)
386 err = svm_check_features();
390 efer = read_msr(MSR_EFER);
391 if (efer & EFER_SVME)
392 return trace_error(-EBUSY);
395 write_msr(MSR_EFER, efer);
397 cpu_data->svm_state = SVMON;
399 if (!vmcb_setup(cpu_data))
400 return trace_error(-EIO);
403 * APM Volume 2, 3.1.1: "When writing the CR0 register, software should
404 * set the values of reserved bits to the values found during the
405 * previous CR0 read."
406 * But we want to avoid surprises with new features unknown to us but
407 * set by Linux. So check if any assumed revered bit was set and bail
409 * Note that the APM defines all reserved CR4 bits as must-be-zero.
411 if (cpu_data->linux_cr0 & X86_CR0_RESERVED)
414 /* bring CR0 and CR4 into well-defined states */
415 write_cr0(X86_CR0_HOST_STATE);
416 write_cr4(X86_CR4_HOST_STATE);
418 write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
423 void vcpu_exit(struct per_cpu *cpu_data)
427 if (cpu_data->svm_state == SVMOFF)
430 cpu_data->svm_state = SVMOFF;
432 /* We are leaving - set the GIF */
433 asm volatile ("stgi" : : : "memory");
435 efer = read_msr(MSR_EFER);
437 write_msr(MSR_EFER, efer);
439 write_msr(MSR_VM_HSAVE_PA, 0);
442 void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data)
444 unsigned long vmcb_pa, host_stack;
446 vmcb_pa = paging_hvirt2phys(&cpu_data->vmcb);
447 host_stack = (unsigned long)cpu_data->stack + sizeof(cpu_data->stack);
450 * XXX: Jailhouse doesn't use PAT, so it is explicitly set to the
451 * reset value. However, this value is later combined with vmcb->g_pat
452 * (as per APMv2, Sect. 15.25.8) which may lead to subtle bugs as the
453 * actual memory type might slightly differ from what Linux expects.
455 write_msr(MSR_IA32_PAT, PAT_RESET_VALUE);
457 /* We enter Linux at the point arch_entry would return to as well.
458 * rax is cleared to signal success to the caller. */
461 "mov (%%rdi),%%r15\n\t"
462 "mov 0x8(%%rdi),%%r14\n\t"
463 "mov 0x10(%%rdi),%%r13\n\t"
464 "mov 0x18(%%rdi),%%r12\n\t"
465 "mov 0x20(%%rdi),%%rbx\n\t"
466 "mov 0x28(%%rdi),%%rbp\n\t"
471 /* Restore hypervisor stack */
475 : "m" (vmcb_pa), "D" (cpu_data->linux_reg), "m" (host_stack)
476 : "memory", "r15", "r14", "r13", "r12",
477 "rbx", "rbp", "rax", "cc");
478 __builtin_unreachable();
481 void __attribute__((noreturn))
482 vcpu_deactivate_vmm(struct registers *guest_regs)
484 struct per_cpu *cpu_data = this_cpu_data();
485 struct vmcb *vmcb = &cpu_data->vmcb;
486 unsigned long *stack = (unsigned long *)vmcb->rsp;
487 unsigned long linux_ip = vmcb->rip;
492 * XXX: One could argue this is better to be done in
493 * arch_cpu_restore(), however, it would require changes
494 * to cpu_data to store STAR and friends.
496 write_msr(MSR_STAR, vmcb->star);
497 write_msr(MSR_LSTAR, vmcb->lstar);
498 write_msr(MSR_CSTAR, vmcb->cstar);
499 write_msr(MSR_SFMASK, vmcb->sfmask);
500 write_msr(MSR_KERNGS_BASE, vmcb->kerngsbase);
501 write_msr(MSR_IA32_PAT, vmcb->g_pat);
503 cpu_data->linux_cr0 = vmcb->cr0;
504 cpu_data->linux_cr3 = vmcb->cr3;
506 cpu_data->linux_gdtr.base = vmcb->gdtr.base;
507 cpu_data->linux_gdtr.limit = vmcb->gdtr.limit;
508 cpu_data->linux_idtr.base = vmcb->idtr.base;
509 cpu_data->linux_idtr.limit = vmcb->idtr.limit;
511 cpu_data->linux_cs.selector = vmcb->cs.selector;
513 cpu_data->linux_tss.selector = vmcb->tr.selector;
515 cpu_data->linux_efer = vmcb->efer & (~EFER_SVME);
516 cpu_data->linux_fs.base = vmcb->fs.base;
517 cpu_data->linux_gs.base = vmcb->gs.base;
519 cpu_data->linux_sysenter_cs = vmcb->sysenter_cs;
520 cpu_data->linux_sysenter_eip = vmcb->sysenter_eip;
521 cpu_data->linux_sysenter_esp = vmcb->sysenter_esp;
523 cpu_data->linux_ds.selector = vmcb->ds.selector;
524 cpu_data->linux_es.selector = vmcb->es.selector;
525 cpu_data->linux_fs.selector = vmcb->fs.selector;
526 cpu_data->linux_gs.selector = vmcb->gs.selector;
528 arch_cpu_restore(cpu_data, 0);
534 "mov %%rbx,%%rsp\n\t"
550 "mov %%rax,%%rsp\n\t"
551 "xor %%rax,%%rax\n\t"
553 : : "a" (stack), "b" (guest_regs));
554 __builtin_unreachable();
557 static void svm_vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
559 struct vmcb *vmcb = &cpu_data->vmcb;
563 vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
570 if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
577 vmcb->cs.selector = sipi_vector << 8;
578 vmcb->cs.base = sipi_vector << 12;
579 vmcb->cs.limit = 0xffff;
580 vmcb->cs.access_rights = 0x009b;
582 vmcb->ds.selector = 0;
584 vmcb->ds.limit = 0xffff;
585 vmcb->ds.access_rights = 0x0093;
587 vmcb->es.selector = 0;
589 vmcb->es.limit = 0xffff;
590 vmcb->es.access_rights = 0x0093;
592 vmcb->fs.selector = 0;
594 vmcb->fs.limit = 0xffff;
595 vmcb->fs.access_rights = 0x0093;
597 vmcb->gs.selector = 0;
599 vmcb->gs.limit = 0xffff;
600 vmcb->gs.access_rights = 0x0093;
602 vmcb->ss.selector = 0;
604 vmcb->ss.limit = 0xffff;
605 vmcb->ss.access_rights = 0x0093;
607 vmcb->tr.selector = 0;
609 vmcb->tr.limit = 0xffff;
610 vmcb->tr.access_rights = 0x008b;
612 vmcb->ldtr.selector = 0;
614 vmcb->ldtr.limit = 0xffff;
615 vmcb->ldtr.access_rights = 0x0082;
617 vmcb->gdtr.selector = 0;
619 vmcb->gdtr.limit = 0xffff;
620 vmcb->gdtr.access_rights = 0;
622 vmcb->idtr.selector = 0;
624 vmcb->idtr.limit = 0xffff;
625 vmcb->idtr.access_rights = 0;
627 vmcb->efer = EFER_SVME;
629 /* These MSRs are undefined on reset */
634 vmcb->sysenter_cs = 0;
635 vmcb->sysenter_eip = 0;
636 vmcb->sysenter_esp = 0;
637 vmcb->kerngsbase = 0;
639 vmcb->g_pat = PAT_RESET_VALUE;
641 vmcb->dr7 = 0x00000400;
643 /* Almost all of the guest state changed */
644 vmcb->clean_bits = 0;
646 ok &= svm_set_cell_config(cpu_data->cell, vmcb);
648 /* This is always false, but to be consistent with vmx.c... */
650 panic_printk("FATAL: CPU reset failed\n");
655 void vcpu_skip_emulated_instruction(unsigned int inst_len)
657 struct per_cpu *cpu_data = this_cpu_data();
658 struct vmcb *vmcb = &cpu_data->vmcb;
659 vmcb->rip += inst_len;
662 static void update_efer(struct per_cpu *cpu_data)
664 struct vmcb *vmcb = &cpu_data->vmcb;
665 unsigned long efer = vmcb->efer;
667 if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
672 /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
673 if ((vmcb->efer ^ efer) & EFER_LMA)
677 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
680 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
682 struct per_cpu *cpu_data = this_cpu_data();
683 struct vmcb *vmcb = &cpu_data->vmcb;
685 if (vmcb->efer & EFER_LMA) {
686 pg_structs->root_paging = x86_64_paging;
687 pg_structs->root_table_gphys =
688 vmcb->cr3 & 0x000ffffffffff000UL;
689 } else if ((vmcb->cr0 & X86_CR0_PG) &&
690 !(vmcb->cr4 & X86_CR4_PAE)) {
691 pg_structs->root_paging = i386_paging;
692 pg_structs->root_table_gphys =
693 vmcb->cr3 & 0xfffff000UL;
694 } else if (!(vmcb->cr0 & X86_CR0_PG)) {
696 * Can be in non-paged protected mode as well, but
697 * the translation mechanism will stay the same ayway.
699 pg_structs->root_paging = realmode_paging;
701 * This will make paging_get_guest_pages map the page
702 * that also contains the bootstrap code and, thus, is
703 * always present in a cell.
705 pg_structs->root_table_gphys = 0xff000;
707 printk("FATAL: Unsupported paging mode\n");
713 struct parse_context {
714 unsigned int remaining;
716 unsigned long cs_base;
720 static bool ctx_advance(struct parse_context *ctx,
722 struct guest_paging_structures *pg_structs)
725 ctx->size = ctx->remaining;
726 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
730 ctx->remaining -= ctx->size;
736 static bool x86_parse_mov_to_cr(struct per_cpu *cpu_data,
741 struct guest_paging_structures pg_structs;
742 struct vmcb *vmcb = &cpu_data->vmcb;
743 struct parse_context ctx = {};
744 /* No prefixes are supported yet */
745 u8 opcodes[] = {0x0f, 0x22}, modrm;
749 ctx.remaining = ARRAY_SIZE(opcodes);
750 if (!vcpu_get_guest_paging_structs(&pg_structs))
752 ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
754 if (!ctx_advance(&ctx, &pc, &pg_structs))
757 for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++) {
758 if (*(ctx.inst) != opcodes[n])
760 if (!ctx_advance(&ctx, &pc, &pg_structs))
764 if (!ctx_advance(&ctx, &pc, &pg_structs))
769 if (((modrm & 0x38) >> 3) != reg)
773 *gpr = (modrm & 0x7);
781 * XXX: The only visible reason to have this function (vmx.c consistency
782 * aside) is to prevent cells from setting invalid CD+NW combinations that
783 * result in no more than VMEXIT_INVALID. Maybe we can get along without it
786 static bool svm_handle_cr(struct registers *guest_regs,
787 struct per_cpu *cpu_data)
789 struct vmcb *vmcb = &cpu_data->vmcb;
790 /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
791 unsigned long reg = -1, val, bits;
795 if (!(vmcb->exitinfo1 & (1UL << 63))) {
796 panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
800 reg = vmcb->exitinfo1 & 0x07;
802 if (!x86_parse_mov_to_cr(cpu_data, vmcb->rip, 0, ®)) {
803 panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
812 val = ((unsigned long *)guest_regs)[15 - reg];
814 vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
815 /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
816 bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
817 if ((val ^ vmcb->cr0) & bits)
819 /* TODO: better check for #GP reasons */
820 vmcb->cr0 = val & SVM_CR0_ALLOWED_BITS;
821 if (val & X86_CR0_PG)
822 update_efer(cpu_data);
823 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
829 static bool svm_handle_msr_read(struct registers *guest_regs,
830 struct per_cpu *cpu_data)
832 if (guest_regs->rcx >= MSR_X2APIC_BASE &&
833 guest_regs->rcx <= MSR_X2APIC_END) {
834 vcpu_skip_emulated_instruction(X86_INST_LEN_RDMSR);
835 x2apic_handle_read(guest_regs);
838 panic_printk("FATAL: Unhandled MSR read: %x\n",
844 static bool svm_handle_msr_write(struct registers *guest_regs,
845 struct per_cpu *cpu_data)
847 struct vmcb *vmcb = &cpu_data->vmcb;
848 unsigned long efer, val;
851 if (guest_regs->rcx >= MSR_X2APIC_BASE &&
852 guest_regs->rcx <= MSR_X2APIC_END) {
853 result = x2apic_handle_write(guest_regs, cpu_data);
856 if (guest_regs->rcx == MSR_EFER) {
857 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
858 efer = (guest_regs->rax & 0xffffffff) |
859 (guest_regs->rdx << 32) | EFER_SVME;
860 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
861 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
864 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
867 if (guest_regs->rcx == MTRR_DEFTYPE) {
868 val = (guest_regs->rax & 0xffffffff) | (guest_regs->rdx << 32);
870 * Quick (and very incomplete) guest MTRRs emulation.
872 * For Linux, emulating MTRR Enable bit seems to be enough.
873 * If it is cleared, we set hPAT to all zeroes, effectively
874 * making all NPT-mapped memory UC (see APMv2, Sect. 15.25.8).
876 * Otherwise, default PAT value is restored. This can also
877 * make NPT-mapped memory's type different from what Linux
881 write_msr(MSR_IA32_PAT, PAT_RESET_VALUE);
883 write_msr(MSR_IA32_PAT, 0);
888 panic_printk("FATAL: Unhandled MSR write: %x\n",
892 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
897 * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
898 * be treated separately in svm_handle_avic_access().
900 static bool svm_handle_apic_access(struct registers *guest_regs,
901 struct per_cpu *cpu_data)
903 struct vmcb *vmcb = &cpu_data->vmcb;
904 struct guest_paging_structures pg_structs;
905 unsigned int inst_len, offset;
908 /* The caller is responsible for sanity checks */
909 is_write = !!(vmcb->exitinfo1 & 0x2);
910 offset = vmcb->exitinfo2 - XAPIC_BASE;
915 if (!vcpu_get_guest_paging_structs(&pg_structs))
918 inst_len = apic_mmio_access(guest_regs, cpu_data, vmcb->rip,
919 &pg_structs, offset >> 4, is_write);
923 vcpu_skip_emulated_instruction(inst_len);
927 panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
932 static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
934 panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
935 vmcb->rsp, vmcb->rflags);
936 panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
937 guest_regs->rbx, guest_regs->rcx);
938 panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
939 guest_regs->rsi, guest_regs->rdi);
940 panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
941 vmcb->cs.selector, vmcb->cs.base, vmcb->cs.access_rights,
942 !!(vmcb->efer & EFER_LMA));
943 panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
944 vmcb->cr3, vmcb->cr4);
945 panic_printk("EFER: %p\n", vmcb->efer);
948 static void svm_get_vcpu_pf_intercept(struct per_cpu *cpu_data,
949 struct vcpu_pf_intercept *out)
951 struct vmcb *vmcb = &cpu_data->vmcb;
953 out->phys_addr = vmcb->exitinfo2;
954 out->is_write = !!(vmcb->exitinfo1 & 0x2);
957 static void svm_get_vcpu_io_intercept(struct per_cpu *cpu_data,
958 struct vcpu_io_intercept *out)
960 struct vmcb *vmcb = &cpu_data->vmcb;
961 u64 exitinfo = vmcb->exitinfo1;
963 /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
964 out->port = (exitinfo >> 16) & 0xFFFF;
965 out->size = (exitinfo >> 4) & 0x7;
966 out->in = !!(exitinfo & 0x1);
967 out->inst_len = vmcb->exitinfo2 - vmcb->rip;
968 out->rep_or_str = !!(exitinfo & 0x0c);
971 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
973 struct vmcb *vmcb = &cpu_data->vmcb;
974 struct vcpu_execution_state x_state;
975 struct vcpu_pf_intercept pf;
976 struct vcpu_io_intercept io;
980 /* Restore GS value expected by per_cpu data accessors */
981 write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
983 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
985 * All guest state is marked unmodified; individual handlers must clear
986 * the bits as needed.
988 vmcb->clean_bits = 0xffffffff;
990 switch (vmcb->exitcode) {
992 panic_printk("FATAL: VM-Entry failure, error %d\n",
996 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
997 /* Temporarily enable GIF to consume pending NMI */
998 asm volatile("stgi; clgi" : : : "memory");
999 sipi_vector = x86_handle_events(cpu_data);
1000 if (sipi_vector >= 0) {
1001 printk("CPU %d received SIPI, vector %x\n",
1002 cpu_data->cpu_id, sipi_vector);
1003 svm_vcpu_reset(cpu_data, sipi_vector);
1004 memset(guest_regs, 0, sizeof(*guest_regs));
1006 iommu_check_pending_faults(cpu_data);
1009 /* FIXME: We are not intercepting CPUID now */
1011 case VMEXIT_VMMCALL:
1012 vcpu_vendor_get_execution_state(&x_state);
1013 vcpu_handle_hypercall(guest_regs, &x_state);
1015 case VMEXIT_CR0_SEL_WRITE:
1016 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
1017 if (svm_handle_cr(guest_regs, cpu_data))
1021 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
1022 if (!vmcb->exitinfo1)
1023 res = svm_handle_msr_read(guest_regs, cpu_data);
1025 res = svm_handle_msr_write(guest_regs, cpu_data);
1030 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
1031 vmcb->exitinfo2 >= XAPIC_BASE &&
1032 vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
1033 /* APIC access in non-AVIC mode */
1034 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
1035 if (svm_handle_apic_access(guest_regs, cpu_data))
1038 /* General MMIO (IOAPIC, PCI etc) */
1039 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
1040 svm_get_vcpu_pf_intercept(cpu_data, &pf);
1041 if (vcpu_handle_pt_violation(guest_regs, &pf))
1045 panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
1046 "error code is %x\n", vmcb->exitinfo2,
1047 vmcb->exitinfo1 & 0xf);
1050 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XSETBV]++;
1051 if ((guest_regs->rax & X86_XCR0_FP) &&
1052 (guest_regs->rax & ~cpuid_eax(0x0d)) == 0 &&
1053 guest_regs->rcx == 0 && guest_regs->rdx == 0) {
1054 vcpu_skip_emulated_instruction(X86_INST_LEN_XSETBV);
1058 : "a" (guest_regs->rax), "c" (0), "d" (0));
1061 panic_printk("FATAL: Invalid xsetbv parameters: "
1062 "xcr[%d] = %x:%x\n", guest_regs->rcx,
1063 guest_regs->rdx, guest_regs->rax);
1066 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
1067 svm_get_vcpu_io_intercept(cpu_data, &io);
1068 if (vcpu_handle_io_access(guest_regs, &io))
1071 /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
1073 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
1074 "exitinfo1 %p exitinfo2 %p\n",
1075 vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
1077 dump_guest_regs(guest_regs, vmcb);
1081 void vcpu_park(struct per_cpu *cpu_data)
1083 struct vmcb *vmcb = &cpu_data->vmcb;
1085 svm_vcpu_reset(cpu_data, APIC_BSP_PSEUDO_SIPI);
1086 /* No need to clear VMCB Clean bit: vcpu_reset() already does this */
1087 vmcb->n_cr3 = paging_hvirt2phys(parked_mode_npt);
1092 void vcpu_nmi_handler(void)
1096 void vcpu_tlb_flush(void)
1098 struct per_cpu *cpu_data = this_cpu_data();
1099 struct vmcb *vmcb = &cpu_data->vmcb;
1101 if (has_flush_by_asid)
1102 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
1104 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
1107 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
1108 unsigned long pc, unsigned int *size)
1110 struct per_cpu *cpu_data = this_cpu_data();
1111 struct vmcb *vmcb = &cpu_data->vmcb;
1112 unsigned long start;
1117 start = vmcb->rip - pc;
1118 if (start < vmcb->bytes_fetched) {
1119 *size = vmcb->bytes_fetched - start;
1120 return &vmcb->guest_bytes[start];
1125 return vcpu_map_inst(pg_structs, pc, size);
1129 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
1130 struct vcpu_io_bitmap *iobm)
1132 iobm->data = cell->svm.iopm;
1133 iobm->size = sizeof(cell->svm.iopm);
1136 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
1138 struct per_cpu *cpu_data = this_cpu_data();
1140 x_state->efer = cpu_data->vmcb.efer;
1141 x_state->rflags = cpu_data->vmcb.rflags;
1142 x_state->cs = cpu_data->vmcb.cs.selector;
1143 x_state->rip = cpu_data->vmcb.rip;
1146 /* GIF must be set for interrupts to be delivered (APMv2, Sect. 15.17) */
1147 void enable_irq(void)
1149 asm volatile("stgi; sti" : : : "memory");
1152 /* Jailhouse runs with GIF cleared, so we need to restore this state */
1153 void disable_irq(void)
1155 asm volatile("cli; clgi" : : : "memory");