2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * Based on vmx.c written by Jan Kiszka.
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/paging.h>
20 #include <jailhouse/printk.h>
21 #include <jailhouse/processor.h>
22 #include <jailhouse/string.h>
25 #include <asm/paging.h>
26 #include <asm/percpu.h>
27 #include <asm/processor.h>
32 * NW bit is ignored by all modern processors, however some
33 * combinations of NW and CD bits are prohibited by SVM (see APMv2,
34 * Sect. 15.5). To handle this, we always keep the NW bit off.
36 #define SVM_CR0_CLEARED_BITS ~X86_CR0_NW
38 static bool has_avic, has_assists, has_flush_by_asid;
40 static const struct segment invalid_seg;
42 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
44 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
45 [ SVM_MSRPM_0000 ] = {
46 [ 0/4 ... 0x017/4 ] = 0,
47 [ 0x018/4 ... 0x01b/4 ] = 0x80, /* 0x01b (w) */
48 [ 0x01c/4 ... 0x7ff/4 ] = 0,
49 /* x2APIC MSRs - emulated if not present */
50 [ 0x800/4 ... 0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
51 [ 0x804/4 ... 0x807/4 ] = 0,
52 [ 0x808/4 ... 0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
53 [ 0x80c/4 ... 0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
54 [ 0x810/4 ... 0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
55 [ 0x814/4 ... 0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
56 [ 0x818/4 ... 0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
57 [ 0x81c/4 ... 0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
58 [ 0x820/4 ... 0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
59 [ 0x824/4 ... 0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
60 [ 0x828/4 ... 0x82b/4 ] = 0x03, /* 0x828 (rw) */
61 [ 0x82c/4 ... 0x82f/4 ] = 0xc0, /* 0x82f (rw) */
62 [ 0x830/4 ... 0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
63 [ 0x834/4 ... 0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
64 [ 0x838/4 ... 0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
65 [ 0x83c/4 ... 0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
66 [ 0x840/4 ... 0x1fff/4 ] = 0,
68 [ SVM_MSRPM_C000 ] = {
69 [ 0/4 ... 0x07f/4 ] = 0,
70 [ 0x080/4 ... 0x083/4 ] = 0x02, /* 0x080 (w) */
71 [ 0x084/4 ... 0x1fff/4 ] = 0
73 [ SVM_MSRPM_C001 ] = {
74 [ 0/4 ... 0x1fff/4 ] = 0,
76 [ SVM_MSRPM_RESV ] = {
77 [ 0/4 ... 0x1fff/4 ] = 0,
81 static void *avic_page;
83 static int svm_check_features(void)
85 /* SVM is available */
86 if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
90 if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
94 if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
98 if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
101 /* TLB Flush by ASID support */
102 if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
103 has_flush_by_asid = true;
108 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
109 const struct desc_table_reg *dtr)
111 struct svm_segment tmp = { 0 };
114 tmp.base = dtr->base;
115 tmp.limit = dtr->limit & 0xffff;
121 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
122 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
123 const struct segment *segment)
127 svm_segment->selector = segment->selector;
129 if (segment->access_rights == 0x10000) {
130 svm_segment->access_rights = 0;
132 ar = segment->access_rights;
133 svm_segment->access_rights =
134 ((ar & 0xf000) >> 4) | (ar & 0x00ff);
137 svm_segment->limit = segment->limit;
138 svm_segment->base = segment->base;
141 static bool vcpu_set_cell_config(struct cell *cell, struct vmcb *vmcb)
143 /* No real need for this function; used for consistency with vmx.c */
144 vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
145 vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
150 static int vmcb_setup(struct per_cpu *cpu_data)
152 struct vmcb *vmcb = &cpu_data->vmcb;
154 memset(vmcb, 0, sizeof(struct vmcb));
156 vmcb->cr0 = read_cr0() & SVM_CR0_CLEARED_BITS;
157 vmcb->cr3 = cpu_data->linux_cr3;
158 vmcb->cr4 = read_cr4();
160 set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
161 set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
162 set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
163 set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
164 set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
165 set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
166 set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
168 set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
169 set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
170 set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
172 vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
175 vmcb->rsp = cpu_data->linux_sp +
176 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
177 vmcb->rip = cpu_data->linux_ip;
179 vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
180 vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
181 vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
182 vmcb->star = read_msr(MSR_STAR);
183 vmcb->lstar = read_msr(MSR_LSTAR);
184 vmcb->cstar = read_msr(MSR_CSTAR);
185 vmcb->sfmask = read_msr(MSR_SFMASK);
186 vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
188 vmcb->dr6 = 0x00000ff0;
189 vmcb->dr7 = 0x00000400;
191 /* Make the hypervisor visible */
192 vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
194 /* Linux uses custom PAT setting */
195 vmcb->g_pat = read_msr(MSR_IA32_PAT);
197 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
198 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
199 /* TODO: Do we need this for SVM ? */
200 /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
201 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
202 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
203 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
205 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
206 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
208 vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
211 /* No more than one guest owns the CPU */
212 vmcb->guest_asid = 1;
214 /* TODO: Setup AVIC */
216 return vcpu_set_cell_config(cpu_data->cell, vmcb);
219 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
223 return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
227 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
229 /* See APMv2, Section 15.25.5 */
230 *pte = (next_pt & 0x000ffffffffff000UL) |
231 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
234 int vcpu_vendor_init(void)
239 err = svm_check_features();
243 vm_cr = read_msr(MSR_VM_CR);
244 if (vm_cr & VM_CR_SVMDIS)
245 /* SVM disabled in BIOS */
248 /* Nested paging is the same as the native one */
249 memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
250 for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
251 npt_paging[n].set_next_pt = npt_set_next_pt;
253 /* This is always false for AMD now (except in nested SVM);
254 see Sect. 16.3.1 in APMv2 */
256 /* allow direct x2APIC access except for ICR writes */
257 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
258 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
259 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
261 /* Enable Extended Interrupt LVT */
262 apic_reserved_bits[0x50] = 0;
264 avic_page = page_alloc(&remap_pool, 1);
270 return vcpu_cell_init(&root_cell);
273 int vcpu_vendor_cell_init(struct cell *cell)
278 /* allocate iopm (two 4-K pages + 3 bits) */
279 cell->svm.iopm = page_alloc(&mem_pool, 3);
283 /* build root NPT of cell */
284 cell->svm.npt_structs.root_paging = npt_paging;
285 cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
286 if (!cell->svm.npt_structs.root_table)
291 * Map xAPIC as is; reads are passed, writes are trapped.
293 flags = PAGE_READONLY_FLAGS |
296 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
297 PAGE_SIZE, XAPIC_BASE,
299 PAGING_NON_COHERENT);
301 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED;
302 err = paging_create(&cell->svm.npt_structs,
303 paging_hvirt2phys(avic_page),
304 PAGE_SIZE, XAPIC_BASE,
306 PAGING_NON_COHERENT);
312 int vcpu_map_memory_region(struct cell *cell,
313 const struct jailhouse_memory *mem)
315 u64 phys_start = mem->phys_start;
316 u32 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
318 if (mem->flags & JAILHOUSE_MEM_READ)
319 flags |= PAGE_FLAG_PRESENT;
320 if (mem->flags & JAILHOUSE_MEM_WRITE)
321 flags |= PAGE_FLAG_RW;
322 if (mem->flags & JAILHOUSE_MEM_EXECUTE)
323 flags |= PAGE_FLAG_EXECUTE;
324 if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
325 phys_start = paging_hvirt2phys(&cell->comm_page);
327 return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
328 mem->virt_start, flags, PAGING_NON_COHERENT);
331 int vcpu_unmap_memory_region(struct cell *cell,
332 const struct jailhouse_memory *mem)
334 return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
335 mem->size, PAGING_NON_COHERENT);
338 void vcpu_vendor_cell_exit(struct cell *cell)
340 paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
341 PAGING_NON_COHERENT);
342 page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
345 int vcpu_init(struct per_cpu *cpu_data)
350 err = svm_check_features();
354 efer = read_msr(MSR_EFER);
355 if (efer & EFER_SVME)
359 write_msr(MSR_EFER, efer);
361 cpu_data->svm_state = SVMON;
363 if (!vmcb_setup(cpu_data))
366 write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
368 /* Enable Extended Interrupt LVT (xAPIC, as it is AMD-only) */
370 apic_reserved_bits[0x50] = 0;
375 void vcpu_exit(struct per_cpu *cpu_data)
379 if (cpu_data->svm_state == SVMOFF)
382 cpu_data->svm_state = SVMOFF;
384 efer = read_msr(MSR_EFER);
386 write_msr(MSR_EFER, efer);
388 write_msr(MSR_VM_HSAVE_PA, 0);
391 void vcpu_activate_vmm(struct per_cpu *cpu_data)
393 /* TODO: Implement */
394 __builtin_unreachable();
397 void __attribute__((noreturn))
398 vcpu_deactivate_vmm(struct registers *guest_regs)
400 /* TODO: Implement */
401 __builtin_unreachable();
404 void vcpu_skip_emulated_instruction(unsigned int inst_len)
406 /* TODO: Implement */
409 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
411 struct per_cpu *cpu_data = this_cpu_data();
412 struct vmcb *vmcb = &cpu_data->vmcb;
414 if (vmcb->efer & EFER_LMA) {
415 pg_structs->root_paging = x86_64_paging;
416 pg_structs->root_table_gphys =
417 vmcb->cr3 & 0x000ffffffffff000UL;
418 } else if ((vmcb->cr0 & X86_CR0_PG) &&
419 !(vmcb->cr4 & X86_CR4_PAE)) {
420 pg_structs->root_paging = i386_paging;
421 pg_structs->root_table_gphys =
422 vmcb->cr3 & 0xfffff000UL;
423 } else if (!(vmcb->cr0 & X86_CR0_PG)) {
425 * Can be in non-paged protected mode as well, but
426 * the translation mechanism will stay the same ayway.
428 pg_structs->root_paging = realmode_paging;
430 * This will make paging_get_guest_pages map the page
431 * that also contains the bootstrap code and, thus, is
432 * always present in a cell.
434 pg_structs->root_table_gphys = 0xff000;
436 printk("FATAL: Unsupported paging mode\n");
442 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
444 /* TODO: Implement */
447 void vcpu_park(struct per_cpu *cpu_data)
449 /* TODO: Implement */
452 void vcpu_nmi_handler(struct per_cpu *cpu_data)
454 /* TODO: Implement */
457 void vcpu_tlb_flush(void)
459 struct per_cpu *cpu_data = this_cpu_data();
460 struct vmcb *vmcb = &cpu_data->vmcb;
462 if (has_flush_by_asid)
463 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
465 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
468 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
469 unsigned long pc, unsigned int *size)
471 /* TODO: Implement */
475 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
476 struct vcpu_io_bitmap *iobm)
478 iobm->data = cell->svm.iopm;
479 iobm->size = sizeof(cell->svm.iopm);
482 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
484 /* TODO: Implement */