2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * Based on vmx.c written by Jan Kiszka.
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell.h>
19 #include <jailhouse/cell-config.h>
20 #include <jailhouse/control.h>
21 #include <jailhouse/paging.h>
22 #include <jailhouse/printk.h>
23 #include <jailhouse/processor.h>
24 #include <jailhouse/string.h>
25 #include <jailhouse/utils.h>
26 #include <asm/amd_iommu.h>
28 #include <asm/control.h>
29 #include <asm/iommu.h>
30 #include <asm/paging.h>
31 #include <asm/percpu.h>
32 #include <asm/processor.h>
37 * NW bit is ignored by all modern processors, however some
38 * combinations of NW and CD bits are prohibited by SVM (see APMv2,
39 * Sect. 15.5). To handle this, we always keep the NW bit off.
41 #define SVM_CR0_ALLOWED_BITS (~X86_CR0_NW)
43 /* IOPM size: two 4-K pages + 3 bits */
46 #define NPT_IOMMU_PAGE_DIR_LEVELS 4
48 static bool has_avic, has_assists, has_flush_by_asid;
50 static const struct segment invalid_seg;
52 static struct paging npt_iommu_paging[NPT_IOMMU_PAGE_DIR_LEVELS];
54 /* bit cleared: direct access allowed */
55 // TODO: convert to whitelist
56 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
57 [ SVM_MSRPM_0000 ] = {
58 [ 0/4 ... 0x017/4 ] = 0,
59 [ 0x018/4 ... 0x01b/4 ] = 0x80, /* 0x01b (w) */
60 [ 0x01c/4 ... 0x1ff/4 ] = 0,
61 [ 0x200/4 ... 0x273/4 ] = 0xaa, /* 0x200 - 0x273 (w) */
62 [ 0x274/4 ... 0x277/4 ] = 0xea, /* 0x274 - 0x276 (w), 0x277 (rw) */
63 [ 0x278/4 ... 0x2fb/4 ] = 0,
64 [ 0x2fc/4 ... 0x2ff/4 ] = 0x80, /* 0x2ff (w) */
65 [ 0x300/4 ... 0x7ff/4 ] = 0,
66 /* x2APIC MSRs - emulated if not present */
67 [ 0x800/4 ... 0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
68 [ 0x804/4 ... 0x807/4 ] = 0,
69 [ 0x808/4 ... 0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
70 [ 0x80c/4 ... 0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
71 [ 0x810/4 ... 0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
72 [ 0x814/4 ... 0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
73 [ 0x818/4 ... 0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
74 [ 0x81c/4 ... 0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
75 [ 0x820/4 ... 0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
76 [ 0x824/4 ... 0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
77 [ 0x828/4 ... 0x82b/4 ] = 0x03, /* 0x828 (rw) */
78 [ 0x82c/4 ... 0x82f/4 ] = 0xc0, /* 0x82f (rw) */
79 [ 0x830/4 ... 0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
80 [ 0x834/4 ... 0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
81 [ 0x838/4 ... 0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
82 [ 0x83c/4 ... 0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
83 [ 0x840/4 ... 0x1fff/4 ] = 0,
85 [ SVM_MSRPM_C000 ] = {
86 [ 0/4 ... 0x07f/4 ] = 0,
87 [ 0x080/4 ... 0x083/4 ] = 0x02, /* 0x080 (w) */
88 [ 0x084/4 ... 0x1fff/4 ] = 0
90 [ SVM_MSRPM_C001 ] = {
91 [ 0/4 ... 0x1fff/4 ] = 0,
93 [ SVM_MSRPM_RESV ] = {
94 [ 0/4 ... 0x1fff/4 ] = 0,
98 /* This page is mapped so the code begins at 0x000ffff0 */
99 static u8 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE] = {
100 [0xff0] = 0xfa, /* 1: cli */
101 [0xff1] = 0xf4, /* hlt */
103 [0xff3] = 0xfc /* jmp 1b */
106 static void *parked_mode_npt;
108 static void *avic_page;
110 static int svm_check_features(void)
112 /* SVM is available */
113 if (!(cpuid_ecx(0x80000001, 0) & X86_FEATURE_SVM))
114 return trace_error(-ENODEV);
117 if (!(cpuid_edx(0x8000000A, 0) & X86_FEATURE_NP))
118 return trace_error(-EIO);
121 if ((cpuid_edx(0x8000000A, 0) & X86_FEATURE_DECODE_ASSISTS))
125 /* FIXME: Jailhouse support is incomplete so far
126 if (cpuid_edx(0x8000000A, 0) & X86_FEATURE_AVIC)
129 /* TLB Flush by ASID support */
130 if (cpuid_edx(0x8000000A, 0) & X86_FEATURE_FLUSH_BY_ASID)
131 has_flush_by_asid = true;
136 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
137 const struct desc_table_reg *dtr)
139 svm_segment->base = dtr->base;
140 svm_segment->limit = dtr->limit & 0xffff;
143 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
144 const struct segment *segment)
146 svm_segment->selector = segment->selector;
147 svm_segment->access_rights = ((segment->access_rights & 0xf000) >> 4) |
148 (segment->access_rights & 0x00ff);
149 svm_segment->limit = segment->limit;
150 svm_segment->base = segment->base;
153 static void svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
155 vmcb->iopm_base_pa = paging_hvirt2phys(cell->arch.svm.iopm);
157 paging_hvirt2phys(cell->arch.svm.npt_iommu_structs.root_table);
160 static void vmcb_setup(struct per_cpu *cpu_data)
162 struct vmcb *vmcb = &cpu_data->vmcb;
164 memset(vmcb, 0, sizeof(struct vmcb));
166 vmcb->cr0 = cpu_data->linux_cr0 & SVM_CR0_ALLOWED_BITS;
167 vmcb->cr3 = cpu_data->linux_cr3;
168 vmcb->cr4 = cpu_data->linux_cr4;
170 set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
171 set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
172 set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
173 set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
174 set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
175 set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
176 set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
177 set_svm_segment_from_segment(&vmcb->ldtr, &invalid_seg);
179 set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
180 set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
182 vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
185 /* Indicate success to the caller of arch_entry */
187 vmcb->rsp = cpu_data->linux_sp +
188 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
189 vmcb->rip = cpu_data->linux_ip;
191 vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
192 vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
193 vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
194 vmcb->star = read_msr(MSR_STAR);
195 vmcb->lstar = read_msr(MSR_LSTAR);
196 vmcb->cstar = read_msr(MSR_CSTAR);
197 vmcb->sfmask = read_msr(MSR_SFMASK);
198 vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
200 vmcb->dr6 = 0x00000ff0;
201 vmcb->dr7 = 0x00000400;
203 /* Make the hypervisor visible */
204 vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
206 vmcb->g_pat = cpu_data->pat;
208 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
209 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
210 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID;
211 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
212 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
213 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
215 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
216 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
219 * We only intercept #DB and #AC to prevent that malicious guests can
220 * trigger infinite loops in microcode (see e.g. CVE-2015-5307 and
223 vmcb->exception_intercepts |= (1 << DB_VECTOR) | (1 << AC_VECTOR);
225 vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
228 /* No more than one guest owns the CPU */
229 vmcb->guest_asid = 1;
231 /* TODO: Setup AVIC */
233 /* Explicitly mark all of the state as new */
234 vmcb->clean_bits = 0;
236 svm_set_cell_config(cpu_data->cell, vmcb);
239 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
243 return paging_virt2phys(&cpu_data->cell->arch.svm.npt_iommu_structs,
247 static void npt_iommu_set_next_pt_l4(pt_entry_t pte, unsigned long next_pt)
250 * Merge IOMMU and NPT flags. We need to mark the NTP entries as user
251 * accessible, see APMv2, Section 15.25.5.
253 *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(3) |
254 AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P |
255 PAGE_DEFAULT_FLAGS | PAGE_FLAG_US;
258 static void npt_iommu_set_next_pt_l3(pt_entry_t pte, unsigned long next_pt)
260 *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(2) |
261 AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P |
262 PAGE_DEFAULT_FLAGS | PAGE_FLAG_US;
265 static void npt_iommu_set_next_pt_l2(pt_entry_t pte, unsigned long next_pt)
267 *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(1) |
268 AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P |
269 PAGE_DEFAULT_FLAGS | PAGE_FLAG_US;
272 static unsigned long npt_iommu_get_phys_l3(pt_entry_t pte, unsigned long virt)
274 if (*pte & AMD_IOMMU_PTE_PG_MODE_MASK)
275 return INVALID_PHYS_ADDR;
276 return (*pte & BIT_MASK(51, 30)) | (virt & BIT_MASK(29, 0));
279 static unsigned long npt_iommu_get_phys_l2(pt_entry_t pte, unsigned long virt)
281 if (*pte & AMD_IOMMU_PTE_PG_MODE_MASK)
282 return INVALID_PHYS_ADDR;
283 return (*pte & BIT_MASK(51, 21)) | (virt & BIT_MASK(20, 0));
286 int vcpu_vendor_init(void)
288 struct paging_structures parking_pt;
292 err = svm_check_features();
296 vm_cr = read_msr(MSR_VM_CR);
297 if (vm_cr & VM_CR_SVMDIS)
298 /* SVM disabled in BIOS */
299 return trace_error(-EPERM);
302 * Nested paging is almost the same as the native one. However, we
303 * need to override some handlers in order to reuse the page table for
306 memcpy(npt_iommu_paging, x86_64_paging, sizeof(npt_iommu_paging));
307 npt_iommu_paging[0].set_next_pt = npt_iommu_set_next_pt_l4;
308 npt_iommu_paging[1].set_next_pt = npt_iommu_set_next_pt_l3;
309 npt_iommu_paging[2].set_next_pt = npt_iommu_set_next_pt_l2;
310 npt_iommu_paging[1].get_phys = npt_iommu_get_phys_l3;
311 npt_iommu_paging[2].get_phys = npt_iommu_get_phys_l2;
313 /* Map guest parking code (shared between cells and CPUs) */
314 parking_pt.root_paging = npt_iommu_paging;
315 parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
316 if (!parked_mode_npt)
318 err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
319 PAGE_SIZE, 0x000ff000,
320 PAGE_READONLY_FLAGS | PAGE_FLAG_US,
321 PAGING_NON_COHERENT);
325 /* This is always false for AMD now (except in nested SVM);
326 see Sect. 16.3.1 in APMv2 */
328 /* allow direct x2APIC access except for ICR writes */
329 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
330 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
331 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
334 avic_page = page_alloc(&remap_pool, 1);
336 return trace_error(-ENOMEM);
340 return vcpu_cell_init(&root_cell);
343 int vcpu_vendor_cell_init(struct cell *cell)
349 cell->arch.svm.iopm = page_alloc(&mem_pool, IOPM_PAGES);
350 if (!cell->arch.svm.iopm)
353 /* build root NPT of cell */
354 cell->arch.svm.npt_iommu_structs.root_paging = npt_iommu_paging;
355 cell->arch.svm.npt_iommu_structs.root_table =
356 (page_table_t)cell->arch.root_table_page;
360 * Map xAPIC as is; reads are passed, writes are trapped.
362 flags = PAGE_READONLY_FLAGS | PAGE_FLAG_US | PAGE_FLAG_DEVICE;
363 err = paging_create(&cell->arch.svm.npt_iommu_structs,
364 XAPIC_BASE, PAGE_SIZE, XAPIC_BASE,
365 flags, PAGING_NON_COHERENT);
367 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE;
368 err = paging_create(&cell->arch.svm.npt_iommu_structs,
369 paging_hvirt2phys(avic_page),
370 PAGE_SIZE, XAPIC_BASE,
371 flags, PAGING_NON_COHERENT);
379 page_free(&mem_pool, cell->arch.svm.iopm, 3);
384 int vcpu_map_memory_region(struct cell *cell,
385 const struct jailhouse_memory *mem)
387 u64 phys_start = mem->phys_start;
388 u64 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
390 if (mem->flags & JAILHOUSE_MEM_READ)
391 flags |= PAGE_FLAG_PRESENT;
392 if (mem->flags & JAILHOUSE_MEM_WRITE)
393 flags |= PAGE_FLAG_RW;
394 if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
395 flags |= PAGE_FLAG_NOEXECUTE;
396 if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
397 phys_start = paging_hvirt2phys(&cell->comm_page);
399 flags |= amd_iommu_get_memory_region_flags(mem);
402 * As we also manipulate the IOMMU page table, changes need to be
405 return paging_create(&cell->arch.svm.npt_iommu_structs, phys_start,
406 mem->size, mem->virt_start, flags,
410 int vcpu_unmap_memory_region(struct cell *cell,
411 const struct jailhouse_memory *mem)
413 return paging_destroy(&cell->arch.svm.npt_iommu_structs,
414 mem->virt_start, mem->size, PAGING_COHERENT);
417 void vcpu_vendor_cell_exit(struct cell *cell)
419 paging_destroy(&cell->arch.svm.npt_iommu_structs, XAPIC_BASE,
420 PAGE_SIZE, PAGING_NON_COHERENT);
421 page_free(&mem_pool, cell->arch.svm.iopm, 3);
424 int vcpu_init(struct per_cpu *cpu_data)
429 err = svm_check_features();
433 efer = read_msr(MSR_EFER);
434 if (efer & EFER_SVME)
435 return trace_error(-EBUSY);
438 write_msr(MSR_EFER, efer);
440 cpu_data->svm_state = SVMON;
442 vmcb_setup(cpu_data);
445 * APM Volume 2, 3.1.1: "When writing the CR0 register, software should
446 * set the values of reserved bits to the values found during the
447 * previous CR0 read."
448 * But we want to avoid surprises with new features unknown to us but
449 * set by Linux. So check if any assumed revered bit was set and bail
451 * Note that the APM defines all reserved CR4 bits as must-be-zero.
453 if (cpu_data->linux_cr0 & X86_CR0_RESERVED)
456 /* bring CR0 and CR4 into well-defined states */
457 write_cr0(X86_CR0_HOST_STATE);
458 write_cr4(X86_CR4_HOST_STATE);
460 write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
465 void vcpu_exit(struct per_cpu *cpu_data)
469 if (cpu_data->svm_state == SVMOFF)
472 cpu_data->svm_state = SVMOFF;
474 /* We are leaving - set the GIF */
475 asm volatile ("stgi" : : : "memory");
477 efer = read_msr(MSR_EFER);
479 write_msr(MSR_EFER, efer);
481 write_msr(MSR_VM_HSAVE_PA, 0);
484 void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data)
486 unsigned long vmcb_pa, host_stack;
488 vmcb_pa = paging_hvirt2phys(&cpu_data->vmcb);
489 host_stack = (unsigned long)cpu_data->stack + sizeof(cpu_data->stack);
491 /* We enter Linux at the point arch_entry would return to as well.
492 * rax is cleared to signal success to the caller. */
495 "mov (%%rdi),%%r15\n\t"
496 "mov 0x8(%%rdi),%%r14\n\t"
497 "mov 0x10(%%rdi),%%r13\n\t"
498 "mov 0x18(%%rdi),%%r12\n\t"
499 "mov 0x20(%%rdi),%%rbx\n\t"
500 "mov 0x28(%%rdi),%%rbp\n\t"
505 : "D" (cpu_data->linux_reg), "a" (vmcb_pa), "m" (host_stack));
506 __builtin_unreachable();
509 void __attribute__((noreturn)) vcpu_deactivate_vmm(void)
511 struct per_cpu *cpu_data = this_cpu_data();
512 struct vmcb *vmcb = &cpu_data->vmcb;
513 unsigned long *stack = (unsigned long *)vmcb->rsp;
514 unsigned long linux_ip = vmcb->rip;
516 cpu_data->linux_cr0 = vmcb->cr0;
517 cpu_data->linux_cr3 = vmcb->cr3;
519 cpu_data->linux_gdtr.base = vmcb->gdtr.base;
520 cpu_data->linux_gdtr.limit = vmcb->gdtr.limit;
521 cpu_data->linux_idtr.base = vmcb->idtr.base;
522 cpu_data->linux_idtr.limit = vmcb->idtr.limit;
524 cpu_data->linux_cs.selector = vmcb->cs.selector;
526 asm volatile("str %0" : "=m" (cpu_data->linux_tss.selector));
528 cpu_data->linux_efer = vmcb->efer & (~EFER_SVME);
529 cpu_data->linux_fs.base = read_msr(MSR_FS_BASE);
530 cpu_data->linux_gs.base = vmcb->gs.base;
532 cpu_data->linux_ds.selector = vmcb->ds.selector;
533 cpu_data->linux_es.selector = vmcb->es.selector;
535 asm volatile("mov %%fs,%0" : "=m" (cpu_data->linux_fs.selector));
536 asm volatile("mov %%gs,%0" : "=m" (cpu_data->linux_gs.selector));
538 arch_cpu_restore(cpu_data, 0);
544 "mov %%rbx,%%rsp\n\t"
560 "mov %%rax,%%rsp\n\t"
561 "xor %%rax,%%rax\n\t"
563 : : "a" (stack), "b" (&cpu_data->guest_regs));
564 __builtin_unreachable();
567 void vcpu_vendor_reset(unsigned int sipi_vector)
569 static const struct svm_segment dataseg_reset_state = {
573 .access_rights = 0x0093,
575 static const struct svm_segment dtr_reset_state = {
581 struct per_cpu *cpu_data = this_cpu_data();
582 struct vmcb *vmcb = &cpu_data->vmcb;
585 vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
592 if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
599 vmcb->cs.selector = sipi_vector << 8;
600 vmcb->cs.base = sipi_vector << 12;
601 vmcb->cs.limit = 0xffff;
602 vmcb->cs.access_rights = 0x009b;
604 vmcb->ds = dataseg_reset_state;
605 vmcb->es = dataseg_reset_state;
606 vmcb->fs = dataseg_reset_state;
607 vmcb->gs = dataseg_reset_state;
608 vmcb->ss = dataseg_reset_state;
610 vmcb->tr.selector = 0;
612 vmcb->tr.limit = 0xffff;
613 vmcb->tr.access_rights = 0x008b;
615 vmcb->ldtr.selector = 0;
617 vmcb->ldtr.limit = 0xffff;
618 vmcb->ldtr.access_rights = 0x0082;
620 vmcb->gdtr = dtr_reset_state;
621 vmcb->idtr = dtr_reset_state;
623 vmcb->efer = EFER_SVME;
625 /* These MSRs are undefined on reset */
630 vmcb->sysenter_cs = 0;
631 vmcb->sysenter_eip = 0;
632 vmcb->sysenter_esp = 0;
633 vmcb->kerngsbase = 0;
635 vmcb->dr7 = 0x00000400;
639 /* Almost all of the guest state changed */
640 vmcb->clean_bits = 0;
642 svm_set_cell_config(cpu_data->cell, vmcb);
646 : : "a" (paging_hvirt2phys(vmcb)) : "memory");
647 /* vmload overwrites GS_BASE - restore the host state */
648 write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
651 void vcpu_skip_emulated_instruction(unsigned int inst_len)
653 this_cpu_data()->vmcb.rip += inst_len;
656 static void update_efer(struct vmcb *vmcb)
658 unsigned long efer = vmcb->efer;
660 if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
665 /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
666 if ((vmcb->efer ^ efer) & EFER_LMA)
670 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
673 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
675 struct vmcb *vmcb = &this_cpu_data()->vmcb;
677 if (vmcb->efer & EFER_LMA) {
678 pg_structs->root_paging = x86_64_paging;
679 pg_structs->root_table_gphys = vmcb->cr3 & BIT_MASK(51, 12);
680 } else if ((vmcb->cr0 & X86_CR0_PG) &&
681 !(vmcb->cr4 & X86_CR4_PAE)) {
682 pg_structs->root_paging = i386_paging;
683 pg_structs->root_table_gphys = vmcb->cr3 & BIT_MASK(31, 12);
684 } else if (!(vmcb->cr0 & X86_CR0_PG)) {
686 * Can be in non-paged protected mode as well, but
687 * the translation mechanism will stay the same ayway.
689 pg_structs->root_paging = realmode_paging;
691 * This will make paging_get_guest_pages map the page
692 * that also contains the bootstrap code and, thus, is
693 * always present in a cell.
695 pg_structs->root_table_gphys = 0xff000;
697 printk("FATAL: Unsupported paging mode\n");
703 void vcpu_vendor_set_guest_pat(unsigned long val)
705 struct vmcb *vmcb = &this_cpu_data()->vmcb;
708 vmcb->clean_bits &= ~CLEAN_BITS_NP;
711 struct parse_context {
712 unsigned int remaining;
714 unsigned long cs_base;
718 static bool ctx_advance(struct parse_context *ctx,
720 struct guest_paging_structures *pg_structs)
723 ctx->size = ctx->remaining;
724 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
728 ctx->remaining -= ctx->size;
734 static bool svm_parse_mov_to_cr(struct vmcb *vmcb, unsigned long pc,
735 unsigned char reg, unsigned long *gpr)
737 struct guest_paging_structures pg_structs;
738 struct parse_context ctx = {};
739 /* No prefixes are supported yet */
740 u8 opcodes[] = {0x0f, 0x22}, modrm;
743 ctx.remaining = ARRAY_SIZE(opcodes);
744 if (!vcpu_get_guest_paging_structs(&pg_structs))
746 ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
748 if (!ctx_advance(&ctx, &pc, &pg_structs))
751 for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++)
752 if (*(ctx.inst) != opcodes[n] ||
753 !ctx_advance(&ctx, &pc, &pg_structs))
756 if (!ctx_advance(&ctx, &pc, &pg_structs))
761 if (((modrm & 0x38) >> 3) != reg)
765 *gpr = (modrm & 0x7);
771 * XXX: The only visible reason to have this function (vmx.c consistency
772 * aside) is to prevent cells from setting invalid CD+NW combinations that
773 * result in no more than VMEXIT_INVALID. Maybe we can get along without it
776 static bool svm_handle_cr(struct per_cpu *cpu_data)
778 struct vmcb *vmcb = &cpu_data->vmcb;
779 /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
780 unsigned long reg = -1, val, bits;
783 if (!(vmcb->exitinfo1 & (1UL << 63))) {
784 panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
787 reg = vmcb->exitinfo1 & 0x07;
789 if (!svm_parse_mov_to_cr(vmcb, vmcb->rip, 0, ®)) {
790 panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
798 val = cpu_data->guest_regs.by_index[15 - reg];
800 vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
801 /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
802 bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
803 if ((val ^ vmcb->cr0) & bits)
805 /* TODO: better check for #GP reasons */
806 vmcb->cr0 = val & SVM_CR0_ALLOWED_BITS;
807 if (val & X86_CR0_PG)
809 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
814 static bool svm_handle_msr_write(struct per_cpu *cpu_data)
816 struct vmcb *vmcb = &cpu_data->vmcb;
819 if (cpu_data->guest_regs.rcx == MSR_EFER) {
820 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
821 efer = get_wrmsr_value(&cpu_data->guest_regs) | EFER_SVME;
822 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
823 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
826 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
827 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
831 return vcpu_handle_msr_write();
835 * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
836 * be treated separately in svm_handle_avic_access().
838 static bool svm_handle_apic_access(struct vmcb *vmcb)
840 struct guest_paging_structures pg_structs;
841 unsigned int inst_len, offset;
844 /* The caller is responsible for sanity checks */
845 is_write = !!(vmcb->exitinfo1 & 0x2);
846 offset = vmcb->exitinfo2 - XAPIC_BASE;
851 if (!vcpu_get_guest_paging_structs(&pg_structs))
854 inst_len = apic_mmio_access(vmcb->rip, &pg_structs, offset >> 4,
859 vcpu_skip_emulated_instruction(inst_len);
863 panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
868 static void dump_guest_regs(union registers *guest_regs, struct vmcb *vmcb)
870 panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
871 vmcb->rsp, vmcb->rflags);
872 panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
873 guest_regs->rbx, guest_regs->rcx);
874 panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
875 guest_regs->rsi, guest_regs->rdi);
876 panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
877 vmcb->cs.selector, vmcb->cs.base, vmcb->cs.access_rights,
878 !!(vmcb->efer & EFER_LMA));
879 panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
880 vmcb->cr3, vmcb->cr4);
881 panic_printk("EFER: %p\n", vmcb->efer);
884 void vcpu_vendor_get_io_intercept(struct vcpu_io_intercept *io)
886 struct vmcb *vmcb = &this_cpu_data()->vmcb;
887 u64 exitinfo = vmcb->exitinfo1;
889 /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
890 io->port = (exitinfo >> 16) & 0xFFFF;
891 io->size = (exitinfo >> 4) & 0x7;
892 io->in = !!(exitinfo & 0x1);
893 io->inst_len = vmcb->exitinfo2 - vmcb->rip;
894 io->rep_or_str = !!(exitinfo & 0x0c);
897 void vcpu_vendor_get_mmio_intercept(struct vcpu_mmio_intercept *mmio)
899 struct vmcb *vmcb = &this_cpu_data()->vmcb;
901 mmio->phys_addr = vmcb->exitinfo2;
902 mmio->is_write = !!(vmcb->exitinfo1 & 0x2);
905 void vcpu_handle_exit(struct per_cpu *cpu_data)
907 struct vmcb *vmcb = &cpu_data->vmcb;
910 vmcb->gs.base = read_msr(MSR_GS_BASE);
912 /* Restore GS value expected by per_cpu data accessors */
913 write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
915 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
917 * All guest state is marked unmodified; individual handlers must clear
918 * the bits as needed.
920 vmcb->clean_bits = 0xffffffff;
922 switch (vmcb->exitcode) {
924 panic_printk("FATAL: VM-Entry failure, error %d\n",
928 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
929 /* Temporarily enable GIF to consume pending NMI */
930 asm volatile("stgi; clgi" : : : "memory");
934 vcpu_handle_hypercall();
936 case VMEXIT_CR0_SEL_WRITE:
937 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
938 if (svm_handle_cr(cpu_data))
945 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
946 if (!vmcb->exitinfo1)
947 res = vcpu_handle_msr_read();
949 res = svm_handle_msr_write(cpu_data);
954 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
955 vmcb->exitinfo2 >= XAPIC_BASE &&
956 vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
957 /* APIC access in non-AVIC mode */
958 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
959 if (svm_handle_apic_access(vmcb))
962 /* General MMIO (IOAPIC, PCI etc) */
963 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
964 if (vcpu_handle_mmio_access())
969 if (vcpu_handle_xsetbv())
973 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
974 if (vcpu_handle_io_access())
977 case VMEXIT_EXCEPTION_DB:
978 case VMEXIT_EXCEPTION_AC:
979 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_EXCEPTION]++;
980 /* Reinject exception, including error code if needed. */
981 vmcb->eventinj = (vmcb->exitcode - VMEXIT_EXCEPTION_DE) |
982 SVM_EVENTINJ_EXCEPTION | SVM_EVENTINJ_VALID;
983 if (vmcb->exitcode == VMEXIT_EXCEPTION_AC) {
984 vmcb->eventinj |= SVM_EVENTINJ_ERR_VALID;
985 vmcb->eventinj_err = vmcb->exitinfo1;
989 /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
991 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
992 "exitinfo1 %p exitinfo2 %p\n",
993 vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
995 dump_guest_regs(&cpu_data->guest_regs, vmcb);
999 write_msr(MSR_GS_BASE, vmcb->gs.base);
1002 void vcpu_park(void)
1004 vcpu_vendor_reset(APIC_BSP_PSEUDO_SIPI);
1005 /* No need to clear VMCB Clean bit: vcpu_vendor_reset() already does
1007 this_cpu_data()->vmcb.n_cr3 = paging_hvirt2phys(parked_mode_npt);
1012 void vcpu_nmi_handler(void)
1016 void vcpu_tlb_flush(void)
1018 struct vmcb *vmcb = &this_cpu_data()->vmcb;
1020 if (has_flush_by_asid)
1021 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
1023 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
1026 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
1027 unsigned long pc, unsigned int *size)
1029 struct vmcb *vmcb = &this_cpu_data()->vmcb;
1030 unsigned long start;
1035 start = vmcb->rip - pc;
1036 if (start < vmcb->bytes_fetched) {
1037 *size = vmcb->bytes_fetched - start;
1038 return &vmcb->guest_bytes[start];
1043 return vcpu_map_inst(pg_structs, pc, size);
1047 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
1048 struct vcpu_io_bitmap *iobm)
1050 iobm->data = cell->arch.svm.iopm;
1051 iobm->size = IOPM_PAGES * PAGE_SIZE;
1054 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
1056 struct vmcb *vmcb = &this_cpu_data()->vmcb;
1058 x_state->efer = vmcb->efer;
1059 x_state->rflags = vmcb->rflags;
1060 x_state->cs = vmcb->cs.selector;
1061 x_state->rip = vmcb->rip;
1064 /* GIF must be set for interrupts to be delivered (APMv2, Sect. 15.17) */
1065 void enable_irq(void)
1067 asm volatile("stgi; sti" : : : "memory");
1070 /* Jailhouse runs with GIF cleared, so we need to restore this state */
1071 void disable_irq(void)
1073 asm volatile("cli; clgi" : : : "memory");