2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * Based on vmx.c written by Jan Kiszka.
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/control.h>
20 #include <jailhouse/paging.h>
21 #include <jailhouse/printk.h>
22 #include <jailhouse/processor.h>
23 #include <jailhouse/string.h>
26 #include <asm/control.h>
27 #include <asm/iommu.h>
28 #include <asm/paging.h>
29 #include <asm/percpu.h>
30 #include <asm/processor.h>
35 * NW bit is ignored by all modern processors, however some
36 * combinations of NW and CD bits are prohibited by SVM (see APMv2,
37 * Sect. 15.5). To handle this, we always keep the NW bit off.
39 #define SVM_CR0_CLEARED_BITS ~X86_CR0_NW
41 static bool has_avic, has_assists, has_flush_by_asid;
43 static const struct segment invalid_seg;
45 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
47 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
48 [ SVM_MSRPM_0000 ] = {
49 [ 0/4 ... 0x017/4 ] = 0,
50 [ 0x018/4 ... 0x01b/4 ] = 0x80, /* 0x01b (w) */
51 [ 0x01c/4 ... 0x7ff/4 ] = 0,
52 /* x2APIC MSRs - emulated if not present */
53 [ 0x800/4 ... 0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
54 [ 0x804/4 ... 0x807/4 ] = 0,
55 [ 0x808/4 ... 0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
56 [ 0x80c/4 ... 0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
57 [ 0x810/4 ... 0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
58 [ 0x814/4 ... 0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
59 [ 0x818/4 ... 0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
60 [ 0x81c/4 ... 0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
61 [ 0x820/4 ... 0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
62 [ 0x824/4 ... 0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
63 [ 0x828/4 ... 0x82b/4 ] = 0x03, /* 0x828 (rw) */
64 [ 0x82c/4 ... 0x82f/4 ] = 0xc0, /* 0x82f (rw) */
65 [ 0x830/4 ... 0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
66 [ 0x834/4 ... 0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
67 [ 0x838/4 ... 0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
68 [ 0x83c/4 ... 0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
69 [ 0x840/4 ... 0x1fff/4 ] = 0,
71 [ SVM_MSRPM_C000 ] = {
72 [ 0/4 ... 0x07f/4 ] = 0,
73 [ 0x080/4 ... 0x083/4 ] = 0x02, /* 0x080 (w) */
74 [ 0x084/4 ... 0x1fff/4 ] = 0
76 [ SVM_MSRPM_C001 ] = {
77 [ 0/4 ... 0x1fff/4 ] = 0,
79 [ SVM_MSRPM_RESV ] = {
80 [ 0/4 ... 0x1fff/4 ] = 0,
84 static void *avic_page;
86 static int svm_check_features(void)
88 /* SVM is available */
89 if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
93 if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
97 if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
101 if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
104 /* TLB Flush by ASID support */
105 if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
106 has_flush_by_asid = true;
111 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
112 const struct desc_table_reg *dtr)
114 struct svm_segment tmp = { 0 };
117 tmp.base = dtr->base;
118 tmp.limit = dtr->limit & 0xffff;
124 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
125 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
126 const struct segment *segment)
130 svm_segment->selector = segment->selector;
132 if (segment->access_rights == 0x10000) {
133 svm_segment->access_rights = 0;
135 ar = segment->access_rights;
136 svm_segment->access_rights =
137 ((ar & 0xf000) >> 4) | (ar & 0x00ff);
140 svm_segment->limit = segment->limit;
141 svm_segment->base = segment->base;
144 static bool vcpu_set_cell_config(struct cell *cell, struct vmcb *vmcb)
146 /* No real need for this function; used for consistency with vmx.c */
147 vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
148 vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
153 static int vmcb_setup(struct per_cpu *cpu_data)
155 struct vmcb *vmcb = &cpu_data->vmcb;
157 memset(vmcb, 0, sizeof(struct vmcb));
159 vmcb->cr0 = read_cr0() & SVM_CR0_CLEARED_BITS;
160 vmcb->cr3 = cpu_data->linux_cr3;
161 vmcb->cr4 = read_cr4();
163 set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
164 set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
165 set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
166 set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
167 set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
168 set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
169 set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
171 set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
172 set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
173 set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
175 vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
178 vmcb->rsp = cpu_data->linux_sp +
179 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
180 vmcb->rip = cpu_data->linux_ip;
182 vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
183 vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
184 vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
185 vmcb->star = read_msr(MSR_STAR);
186 vmcb->lstar = read_msr(MSR_LSTAR);
187 vmcb->cstar = read_msr(MSR_CSTAR);
188 vmcb->sfmask = read_msr(MSR_SFMASK);
189 vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
191 vmcb->dr6 = 0x00000ff0;
192 vmcb->dr7 = 0x00000400;
194 /* Make the hypervisor visible */
195 vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
197 /* Linux uses custom PAT setting */
198 vmcb->g_pat = read_msr(MSR_IA32_PAT);
200 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
201 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
202 /* TODO: Do we need this for SVM ? */
203 /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
204 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
205 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
206 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
208 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
209 vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
211 vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
214 /* No more than one guest owns the CPU */
215 vmcb->guest_asid = 1;
217 /* TODO: Setup AVIC */
219 return vcpu_set_cell_config(cpu_data->cell, vmcb);
222 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
226 return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
230 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
232 /* See APMv2, Section 15.25.5 */
233 *pte = (next_pt & 0x000ffffffffff000UL) |
234 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
237 int vcpu_vendor_init(void)
242 err = svm_check_features();
246 vm_cr = read_msr(MSR_VM_CR);
247 if (vm_cr & VM_CR_SVMDIS)
248 /* SVM disabled in BIOS */
251 /* Nested paging is the same as the native one */
252 memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
253 for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
254 npt_paging[n].set_next_pt = npt_set_next_pt;
256 /* This is always false for AMD now (except in nested SVM);
257 see Sect. 16.3.1 in APMv2 */
259 /* allow direct x2APIC access except for ICR writes */
260 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
261 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
262 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
264 /* Enable Extended Interrupt LVT */
265 apic_reserved_bits[0x50] = 0;
267 avic_page = page_alloc(&remap_pool, 1);
273 return vcpu_cell_init(&root_cell);
276 int vcpu_vendor_cell_init(struct cell *cell)
281 /* allocate iopm (two 4-K pages + 3 bits) */
282 cell->svm.iopm = page_alloc(&mem_pool, 3);
286 /* build root NPT of cell */
287 cell->svm.npt_structs.root_paging = npt_paging;
288 cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
289 if (!cell->svm.npt_structs.root_table)
294 * Map xAPIC as is; reads are passed, writes are trapped.
296 flags = PAGE_READONLY_FLAGS |
299 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
300 PAGE_SIZE, XAPIC_BASE,
302 PAGING_NON_COHERENT);
304 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED;
305 err = paging_create(&cell->svm.npt_structs,
306 paging_hvirt2phys(avic_page),
307 PAGE_SIZE, XAPIC_BASE,
309 PAGING_NON_COHERENT);
315 int vcpu_map_memory_region(struct cell *cell,
316 const struct jailhouse_memory *mem)
318 u64 phys_start = mem->phys_start;
319 u32 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
321 if (mem->flags & JAILHOUSE_MEM_READ)
322 flags |= PAGE_FLAG_PRESENT;
323 if (mem->flags & JAILHOUSE_MEM_WRITE)
324 flags |= PAGE_FLAG_RW;
325 if (mem->flags & JAILHOUSE_MEM_EXECUTE)
326 flags |= PAGE_FLAG_EXECUTE;
327 if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
328 phys_start = paging_hvirt2phys(&cell->comm_page);
330 return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
331 mem->virt_start, flags, PAGING_NON_COHERENT);
334 int vcpu_unmap_memory_region(struct cell *cell,
335 const struct jailhouse_memory *mem)
337 return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
338 mem->size, PAGING_NON_COHERENT);
341 void vcpu_vendor_cell_exit(struct cell *cell)
343 paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
344 PAGING_NON_COHERENT);
345 page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
348 int vcpu_init(struct per_cpu *cpu_data)
353 err = svm_check_features();
357 efer = read_msr(MSR_EFER);
358 if (efer & EFER_SVME)
362 write_msr(MSR_EFER, efer);
364 cpu_data->svm_state = SVMON;
366 if (!vmcb_setup(cpu_data))
369 write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
371 /* Enable Extended Interrupt LVT (xAPIC, as it is AMD-only) */
373 apic_reserved_bits[0x50] = 0;
378 void vcpu_exit(struct per_cpu *cpu_data)
382 if (cpu_data->svm_state == SVMOFF)
385 cpu_data->svm_state = SVMOFF;
387 efer = read_msr(MSR_EFER);
389 write_msr(MSR_EFER, efer);
391 write_msr(MSR_VM_HSAVE_PA, 0);
394 void vcpu_activate_vmm(struct per_cpu *cpu_data)
396 /* TODO: Implement */
397 __builtin_unreachable();
400 void __attribute__((noreturn))
401 vcpu_deactivate_vmm(struct registers *guest_regs)
403 /* TODO: Implement */
404 __builtin_unreachable();
407 static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
409 struct vmcb *vmcb = &cpu_data->vmcb;
413 vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
420 if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
427 vmcb->cs.selector = sipi_vector << 8;
428 vmcb->cs.base = sipi_vector << 12;
429 vmcb->cs.limit = 0xffff;
430 vmcb->cs.access_rights = 0x009b;
432 vmcb->ds.selector = 0;
434 vmcb->ds.limit = 0xffff;
435 vmcb->ds.access_rights = 0x0093;
437 vmcb->es.selector = 0;
439 vmcb->es.limit = 0xffff;
440 vmcb->es.access_rights = 0x0093;
442 vmcb->fs.selector = 0;
444 vmcb->fs.limit = 0xffff;
445 vmcb->fs.access_rights = 0x0093;
447 vmcb->gs.selector = 0;
449 vmcb->gs.limit = 0xffff;
450 vmcb->gs.access_rights = 0x0093;
452 vmcb->ss.selector = 0;
454 vmcb->ss.limit = 0xffff;
455 vmcb->ss.access_rights = 0x0093;
457 vmcb->tr.selector = 0;
459 vmcb->tr.limit = 0xffff;
460 vmcb->tr.access_rights = 0x008b;
462 vmcb->ldtr.selector = 0;
464 vmcb->ldtr.limit = 0xffff;
465 vmcb->ldtr.access_rights = 0x0082;
467 vmcb->gdtr.selector = 0;
469 vmcb->gdtr.limit = 0xffff;
470 vmcb->gdtr.access_rights = 0;
472 vmcb->idtr.selector = 0;
474 vmcb->idtr.limit = 0xffff;
475 vmcb->idtr.access_rights = 0;
477 vmcb->efer = EFER_SVME;
479 /* These MSRs are undefined on reset */
484 vmcb->sysenter_cs = 0;
485 vmcb->sysenter_eip = 0;
486 vmcb->sysenter_esp = 0;
487 vmcb->kerngsbase = 0;
489 vmcb->g_pat = 0x0007040600070406;
491 vmcb->dr7 = 0x00000400;
493 ok &= vcpu_set_cell_config(cpu_data->cell, vmcb);
495 /* This is always false, but to be consistent with vmx.c... */
497 panic_printk("FATAL: CPU reset failed\n");
502 void vcpu_skip_emulated_instruction(unsigned int inst_len)
504 struct per_cpu *cpu_data = this_cpu_data();
505 struct vmcb *vmcb = &cpu_data->vmcb;
506 vmcb->rip += inst_len;
509 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
511 struct per_cpu *cpu_data = this_cpu_data();
512 struct vmcb *vmcb = &cpu_data->vmcb;
514 if (vmcb->efer & EFER_LMA) {
515 pg_structs->root_paging = x86_64_paging;
516 pg_structs->root_table_gphys =
517 vmcb->cr3 & 0x000ffffffffff000UL;
518 } else if ((vmcb->cr0 & X86_CR0_PG) &&
519 !(vmcb->cr4 & X86_CR4_PAE)) {
520 pg_structs->root_paging = i386_paging;
521 pg_structs->root_table_gphys =
522 vmcb->cr3 & 0xfffff000UL;
523 } else if (!(vmcb->cr0 & X86_CR0_PG)) {
525 * Can be in non-paged protected mode as well, but
526 * the translation mechanism will stay the same ayway.
528 pg_structs->root_paging = realmode_paging;
530 * This will make paging_get_guest_pages map the page
531 * that also contains the bootstrap code and, thus, is
532 * always present in a cell.
534 pg_structs->root_table_gphys = 0xff000;
536 printk("FATAL: Unsupported paging mode\n");
542 static bool svm_handle_msr_read(struct registers *guest_regs,
543 struct per_cpu *cpu_data)
545 if (guest_regs->rcx >= MSR_X2APIC_BASE &&
546 guest_regs->rcx <= MSR_X2APIC_END) {
547 vcpu_skip_emulated_instruction(X86_INST_LEN_RDMSR);
548 x2apic_handle_read(guest_regs);
551 panic_printk("FATAL: Unhandled MSR read: %x\n",
557 static bool svm_handle_msr_write(struct registers *guest_regs,
558 struct per_cpu *cpu_data)
560 struct vmcb *vmcb = &cpu_data->vmcb;
564 if (guest_regs->rcx >= MSR_X2APIC_BASE &&
565 guest_regs->rcx <= MSR_X2APIC_END) {
566 result = x2apic_handle_write(guest_regs, cpu_data);
569 if (guest_regs->rcx == MSR_EFER) {
570 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
571 efer = (guest_regs->rax & 0xffffffff) |
572 (guest_regs->rdx << 32) | EFER_SVME;
573 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
574 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
581 panic_printk("FATAL: Unhandled MSR write: %x\n",
585 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
590 * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
591 * be treated separately in svm_handle_avic_access().
593 static bool svm_handle_apic_access(struct registers *guest_regs,
594 struct per_cpu *cpu_data)
596 struct vmcb *vmcb = &cpu_data->vmcb;
597 struct guest_paging_structures pg_structs;
598 unsigned int inst_len, offset;
601 /* The caller is responsible for sanity checks */
602 is_write = !!(vmcb->exitinfo1 & 0x2);
603 offset = vmcb->exitinfo2 - XAPIC_BASE;
608 if (!vcpu_get_guest_paging_structs(&pg_structs))
611 inst_len = apic_mmio_access(guest_regs, cpu_data, vmcb->rip,
612 &pg_structs, offset >> 4, is_write);
616 vcpu_skip_emulated_instruction(inst_len);
620 panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
625 static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
627 panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
628 vmcb->rsp, vmcb->rflags);
629 panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
630 guest_regs->rbx, guest_regs->rcx);
631 panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
632 guest_regs->rsi, guest_regs->rdi);
633 panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
636 vmcb->cs.access_rights,
637 (vmcb->efer & EFER_LMA));
638 panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
639 vmcb->cr3, vmcb->cr4);
640 panic_printk("EFER: %p\n", vmcb->efer);
643 static void vcpu_vendor_get_pf_intercept(struct per_cpu *cpu_data,
644 struct vcpu_pf_intercept *out)
646 struct vmcb *vmcb = &cpu_data->vmcb;
648 out->phys_addr = vmcb->exitinfo2;
649 out->is_write = !!(vmcb->exitinfo1 & 0x2);
652 static void vcpu_vendor_get_io_intercept(struct per_cpu *cpu_data,
653 struct vcpu_io_intercept *out)
655 struct vmcb *vmcb = &cpu_data->vmcb;
656 u64 exitinfo = vmcb->exitinfo1;
658 /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
659 out->port = (exitinfo >> 16) & 0xFFFF;
660 out->size = (exitinfo >> 4) & 0x7;
661 out->in = !!(exitinfo & 0x1);
662 out->inst_len = vmcb->exitinfo2 - vmcb->rip;
663 out->rep_or_str = !!(exitinfo & 0x0c);
666 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
668 struct vmcb *vmcb = &cpu_data->vmcb;
669 struct vcpu_execution_state x_state;
670 struct vcpu_pf_intercept pf;
671 struct vcpu_io_intercept io;
675 /* Restore GS value expected by per_cpu data accessors */
676 write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
678 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
680 switch (vmcb->exitcode) {
682 panic_printk("FATAL: VM-Entry failure, error %d\n",
686 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
687 /* Temporarily enable GIF to consume pending NMI */
688 asm volatile("stgi; clgi" : : : "memory");
689 sipi_vector = x86_handle_events(cpu_data);
690 if (sipi_vector >= 0) {
691 printk("CPU %d received SIPI, vector %x\n",
692 cpu_data->cpu_id, sipi_vector);
693 vcpu_reset(cpu_data, sipi_vector);
694 memset(guest_regs, 0, sizeof(*guest_regs));
696 iommu_check_pending_faults(cpu_data);
699 /* FIXME: We are not intercepting CPUID now */
702 vcpu_vendor_get_execution_state(&x_state);
703 vcpu_handle_hypercall(guest_regs, &x_state);
706 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
707 if (!vmcb->exitinfo1)
708 res = svm_handle_msr_read(guest_regs, cpu_data);
710 res = svm_handle_msr_write(guest_regs, cpu_data);
715 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
716 vmcb->exitinfo2 >= XAPIC_BASE &&
717 vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
718 /* APIC access in non-AVIC mode */
719 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
720 if (svm_handle_apic_access(guest_regs, cpu_data))
723 /* General MMIO (IOAPIC, PCI etc) */
724 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
725 vcpu_vendor_get_pf_intercept(cpu_data, &pf);
726 if (vcpu_handle_pt_violation(guest_regs, &pf))
730 panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
731 "error code is %x\n", vmcb->exitinfo2,
732 vmcb->exitinfo1 & 0xf);
735 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
736 vcpu_vendor_get_io_intercept(cpu_data, &io);
737 if (vcpu_handle_io_access(guest_regs, &io))
740 /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
742 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
743 "exitinfo1 %p exitinfo2 %p\n",
744 vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
746 dump_guest_regs(guest_regs, vmcb);
750 void vcpu_park(struct per_cpu *cpu_data)
752 /* TODO: Implement */
755 void vcpu_nmi_handler(struct per_cpu *cpu_data)
757 printk("Consuming pending NMI on CPU %d\n", cpu_data->cpu_id);
760 void vcpu_tlb_flush(void)
762 struct per_cpu *cpu_data = this_cpu_data();
763 struct vmcb *vmcb = &cpu_data->vmcb;
765 if (has_flush_by_asid)
766 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
768 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
771 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
772 unsigned long pc, unsigned int *size)
774 struct per_cpu *cpu_data = this_cpu_data();
775 struct vmcb *vmcb = &cpu_data->vmcb;
781 start = vmcb->rip - pc;
782 if (start < vmcb->bytes_fetched) {
783 *size = vmcb->bytes_fetched - start;
784 return &vmcb->guest_bytes[start];
789 return vcpu_map_inst(pg_structs, pc, size);
793 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
794 struct vcpu_io_bitmap *iobm)
796 iobm->data = cell->svm.iopm;
797 iobm->size = sizeof(cell->svm.iopm);
800 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
802 struct per_cpu *cpu_data = this_cpu_data();
804 x_state->efer = cpu_data->vmcb.efer;
805 x_state->rflags = cpu_data->vmcb.rflags;
806 x_state->cs = cpu_data->vmcb.cs.selector;
807 x_state->rip = cpu_data->vmcb.rip;