]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/svm.c
x86: Add sub-leaf selection parameter to cpuid_*
[jailhouse.git] / hypervisor / arch / x86 / svm.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * Based on vmx.c written by Jan Kiszka.
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  */
16
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell.h>
19 #include <jailhouse/cell-config.h>
20 #include <jailhouse/control.h>
21 #include <jailhouse/paging.h>
22 #include <jailhouse/printk.h>
23 #include <jailhouse/processor.h>
24 #include <jailhouse/string.h>
25 #include <jailhouse/utils.h>
26 #include <asm/apic.h>
27 #include <asm/control.h>
28 #include <asm/iommu.h>
29 #include <asm/paging.h>
30 #include <asm/percpu.h>
31 #include <asm/processor.h>
32 #include <asm/svm.h>
33 #include <asm/vcpu.h>
34
35 /*
36  * NW bit is ignored by all modern processors, however some
37  * combinations of NW and CD bits are prohibited by SVM (see APMv2,
38  * Sect. 15.5). To handle this, we always keep the NW bit off.
39  */
40 #define SVM_CR0_ALLOWED_BITS    (~X86_CR0_NW)
41
42 /* IOPM size: two 4-K pages + 3 bits */
43 #define IOPM_PAGES              3
44
45 static bool has_avic, has_assists, has_flush_by_asid;
46
47 static const struct segment invalid_seg;
48
49 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
50
51 /* bit cleared: direct access allowed */
52 // TODO: convert to whitelist
53 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
54         [ SVM_MSRPM_0000 ] = {
55                 [      0/4 ...  0x017/4 ] = 0,
56                 [  0x018/4 ...  0x01b/4 ] = 0x80, /* 0x01b (w) */
57                 [  0x01c/4 ...  0x1ff/4 ] = 0,
58                 [  0x200/4 ...  0x273/4 ] = 0xaa, /* 0x200 - 0x273 (w) */
59                 [  0x274/4 ...  0x277/4 ] = 0xea, /* 0x274 - 0x276 (w), 0x277 (rw) */
60                 [  0x278/4 ...  0x2fb/4 ] = 0,
61                 [  0x2fc/4 ...  0x2ff/4 ] = 0x80, /* 0x2ff (w) */
62                 [  0x300/4 ...  0x7ff/4 ] = 0,
63                 /* x2APIC MSRs - emulated if not present */
64                 [  0x800/4 ...  0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
65                 [  0x804/4 ...  0x807/4 ] = 0,
66                 [  0x808/4 ...  0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
67                 [  0x80c/4 ...  0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
68                 [  0x810/4 ...  0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
69                 [  0x814/4 ...  0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
70                 [  0x818/4 ...  0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
71                 [  0x81c/4 ...  0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
72                 [  0x820/4 ...  0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
73                 [  0x824/4 ...  0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
74                 [  0x828/4 ...  0x82b/4 ] = 0x03, /* 0x828 (rw) */
75                 [  0x82c/4 ...  0x82f/4 ] = 0xc0, /* 0x82f (rw) */
76                 [  0x830/4 ...  0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
77                 [  0x834/4 ...  0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
78                 [  0x838/4 ...  0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
79                 [  0x83c/4 ...  0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
80                 [  0x840/4 ... 0x1fff/4 ] = 0,
81         },
82         [ SVM_MSRPM_C000 ] = {
83                 [      0/4 ...  0x07f/4 ] = 0,
84                 [  0x080/4 ...  0x083/4 ] = 0x02, /* 0x080 (w) */
85                 [  0x084/4 ... 0x1fff/4 ] = 0
86         },
87         [ SVM_MSRPM_C001 ] = {
88                 [      0/4 ... 0x1fff/4 ] = 0,
89         },
90         [ SVM_MSRPM_RESV ] = {
91                 [      0/4 ... 0x1fff/4 ] = 0,
92         }
93 };
94
95 /* This page is mapped so the code begins at 0x000ffff0 */
96 static u8 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE] = {
97         [0xff0] = 0xfa, /* 1: cli */
98         [0xff1] = 0xf4, /*    hlt */
99         [0xff2] = 0xeb,
100         [0xff3] = 0xfc  /*    jmp 1b */
101 };
102
103 static void *parked_mode_npt;
104
105 static void *avic_page;
106
107 static int svm_check_features(void)
108 {
109         /* SVM is available */
110         if (!(cpuid_ecx(0x80000001, 0) & X86_FEATURE_SVM))
111                 return trace_error(-ENODEV);
112
113         /* Nested paging */
114         if (!(cpuid_edx(0x8000000A, 0) & X86_FEATURE_NP))
115                 return trace_error(-EIO);
116
117         /* Decode assists */
118         if ((cpuid_edx(0x8000000A, 0) & X86_FEATURE_DECODE_ASSISTS))
119                 has_assists = true;
120
121         /* AVIC support */
122         /* FIXME: Jailhouse support is incomplete so far
123         if (cpuid_edx(0x8000000A, 0) & X86_FEATURE_AVIC)
124                 has_avic = true; */
125
126         /* TLB Flush by ASID support */
127         if (cpuid_edx(0x8000000A, 0) & X86_FEATURE_FLUSH_BY_ASID)
128                 has_flush_by_asid = true;
129
130         return 0;
131 }
132
133 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
134                                      const struct desc_table_reg *dtr)
135 {
136         svm_segment->base = dtr->base;
137         svm_segment->limit = dtr->limit & 0xffff;
138 }
139
140 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
141                                          const struct segment *segment)
142 {
143         svm_segment->selector = segment->selector;
144         svm_segment->access_rights = ((segment->access_rights & 0xf000) >> 4) |
145                 (segment->access_rights & 0x00ff);
146         svm_segment->limit = segment->limit;
147         svm_segment->base = segment->base;
148 }
149
150 static void svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
151 {
152         vmcb->iopm_base_pa = paging_hvirt2phys(cell->arch.svm.iopm);
153         vmcb->n_cr3 = paging_hvirt2phys(cell->arch.svm.npt_structs.root_table);
154 }
155
156 static void vmcb_setup(struct per_cpu *cpu_data)
157 {
158         struct vmcb *vmcb = &cpu_data->vmcb;
159
160         memset(vmcb, 0, sizeof(struct vmcb));
161
162         vmcb->cr0 = cpu_data->linux_cr0 & SVM_CR0_ALLOWED_BITS;
163         vmcb->cr3 = cpu_data->linux_cr3;
164         vmcb->cr4 = cpu_data->linux_cr4;
165
166         set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
167         set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
168         set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
169         set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
170         set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
171         set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
172         set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
173         set_svm_segment_from_segment(&vmcb->ldtr, &invalid_seg);
174
175         set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
176         set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
177
178         vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
179
180         vmcb->rflags = 0x02;
181         /* Indicate success to the caller of arch_entry */
182         vmcb->rax = 0;
183         vmcb->rsp = cpu_data->linux_sp +
184                 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
185         vmcb->rip = cpu_data->linux_ip;
186
187         vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
188         vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
189         vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
190         vmcb->star = read_msr(MSR_STAR);
191         vmcb->lstar = read_msr(MSR_LSTAR);
192         vmcb->cstar = read_msr(MSR_CSTAR);
193         vmcb->sfmask = read_msr(MSR_SFMASK);
194         vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
195
196         vmcb->dr6 = 0x00000ff0;
197         vmcb->dr7 = 0x00000400;
198
199         /* Make the hypervisor visible */
200         vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
201
202         vmcb->g_pat = cpu_data->pat;
203
204         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
205         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
206         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID;
207         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
208         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
209         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
210
211         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
212         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
213
214         /*
215          * We only intercept #DB and #AC to prevent that malicious guests can
216          * trigger infinite loops in microcode (see e.g. CVE-2015-5307 and
217          * CVE-2015-8104).
218          */
219         vmcb->exception_intercepts |= (1 << DB_VECTOR) | (1 << AC_VECTOR);
220
221         vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
222
223         vmcb->np_enable = 1;
224         /* No more than one guest owns the CPU */
225         vmcb->guest_asid = 1;
226
227         /* TODO: Setup AVIC */
228
229         /* Explicitly mark all of the state as new */
230         vmcb->clean_bits = 0;
231
232         svm_set_cell_config(cpu_data->cell, vmcb);
233 }
234
235 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
236                                      unsigned long gphys,
237                                      unsigned long flags)
238 {
239         return paging_virt2phys(&cpu_data->cell->arch.svm.npt_structs,
240                         gphys, flags);
241 }
242
243 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
244 {
245         /* See APMv2, Section 15.25.5 */
246         *pte = (next_pt & 0x000ffffffffff000UL) |
247                 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
248 }
249
250 int vcpu_vendor_init(void)
251 {
252         struct paging_structures parking_pt;
253         unsigned long vm_cr;
254         int err, n;
255
256         err = svm_check_features();
257         if (err)
258                 return err;
259
260         vm_cr = read_msr(MSR_VM_CR);
261         if (vm_cr & VM_CR_SVMDIS)
262                 /* SVM disabled in BIOS */
263                 return trace_error(-EPERM);
264
265         /* Nested paging is the same as the native one */
266         memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
267         for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
268                 npt_paging[n].set_next_pt = npt_set_next_pt;
269
270         /* Map guest parking code (shared between cells and CPUs) */
271         parking_pt.root_paging = npt_paging;
272         parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
273         if (!parked_mode_npt)
274                 return -ENOMEM;
275         err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
276                             PAGE_SIZE, 0x000ff000,
277                             PAGE_READONLY_FLAGS | PAGE_FLAG_US,
278                             PAGING_NON_COHERENT);
279         if (err)
280                 return err;
281
282         /* This is always false for AMD now (except in nested SVM);
283            see Sect. 16.3.1 in APMv2 */
284         if (using_x2apic) {
285                 /* allow direct x2APIC access except for ICR writes */
286                 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
287                                 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
288                 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
289         } else {
290                 if (has_avic) {
291                         avic_page = page_alloc(&remap_pool, 1);
292                         if (!avic_page)
293                                 return trace_error(-ENOMEM);
294                 }
295         }
296
297         return vcpu_cell_init(&root_cell);
298 }
299
300 int vcpu_vendor_cell_init(struct cell *cell)
301 {
302         int err = -ENOMEM;
303         u64 flags;
304
305         /* allocate iopm  */
306         cell->arch.svm.iopm = page_alloc(&mem_pool, IOPM_PAGES);
307         if (!cell->arch.svm.iopm)
308                 return err;
309
310         /* build root NPT of cell */
311         cell->arch.svm.npt_structs.root_paging = npt_paging;
312         cell->arch.svm.npt_structs.root_table =
313                 (page_table_t)cell->arch.root_table_page;
314
315         if (!has_avic) {
316                 /*
317                  * Map xAPIC as is; reads are passed, writes are trapped.
318                  */
319                 flags = PAGE_READONLY_FLAGS | PAGE_FLAG_US | PAGE_FLAG_DEVICE;
320                 err = paging_create(&cell->arch.svm.npt_structs, XAPIC_BASE,
321                                     PAGE_SIZE, XAPIC_BASE,
322                                     flags,
323                                     PAGING_NON_COHERENT);
324         } else {
325                 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE;
326                 err = paging_create(&cell->arch.svm.npt_structs,
327                                     paging_hvirt2phys(avic_page),
328                                     PAGE_SIZE, XAPIC_BASE,
329                                     flags,
330                                     PAGING_NON_COHERENT);
331         }
332         if (err)
333                 goto err_free_iopm;
334
335         return 0;
336
337 err_free_iopm:
338         page_free(&mem_pool, cell->arch.svm.iopm, 3);
339
340         return err;
341 }
342
343 int vcpu_map_memory_region(struct cell *cell,
344                            const struct jailhouse_memory *mem)
345 {
346         u64 phys_start = mem->phys_start;
347         u64 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
348
349         if (mem->flags & JAILHOUSE_MEM_READ)
350                 flags |= PAGE_FLAG_PRESENT;
351         if (mem->flags & JAILHOUSE_MEM_WRITE)
352                 flags |= PAGE_FLAG_RW;
353         if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
354                 flags |= PAGE_FLAG_NOEXECUTE;
355         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
356                 phys_start = paging_hvirt2phys(&cell->comm_page);
357
358         return paging_create(&cell->arch.svm.npt_structs, phys_start, mem->size,
359                              mem->virt_start, flags, PAGING_NON_COHERENT);
360 }
361
362 int vcpu_unmap_memory_region(struct cell *cell,
363                              const struct jailhouse_memory *mem)
364 {
365         return paging_destroy(&cell->arch.svm.npt_structs, mem->virt_start,
366                               mem->size, PAGING_NON_COHERENT);
367 }
368
369 void vcpu_vendor_cell_exit(struct cell *cell)
370 {
371         paging_destroy(&cell->arch.svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
372                        PAGING_NON_COHERENT);
373         page_free(&mem_pool, cell->arch.svm.iopm, 3);
374 }
375
376 int vcpu_init(struct per_cpu *cpu_data)
377 {
378         unsigned long efer;
379         int err;
380
381         err = svm_check_features();
382         if (err)
383                 return err;
384
385         efer = read_msr(MSR_EFER);
386         if (efer & EFER_SVME)
387                 return trace_error(-EBUSY);
388
389         efer |= EFER_SVME;
390         write_msr(MSR_EFER, efer);
391
392         cpu_data->svm_state = SVMON;
393
394         vmcb_setup(cpu_data);
395
396         /*
397          * APM Volume 2, 3.1.1: "When writing the CR0 register, software should
398          * set the values of reserved bits to the values found during the
399          * previous CR0 read."
400          * But we want to avoid surprises with new features unknown to us but
401          * set by Linux. So check if any assumed revered bit was set and bail
402          * out if so.
403          * Note that the APM defines all reserved CR4 bits as must-be-zero.
404          */
405         if (cpu_data->linux_cr0 & X86_CR0_RESERVED)
406                 return -EIO;
407
408         /* bring CR0 and CR4 into well-defined states */
409         write_cr0(X86_CR0_HOST_STATE);
410         write_cr4(X86_CR4_HOST_STATE);
411
412         write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
413
414         return 0;
415 }
416
417 void vcpu_exit(struct per_cpu *cpu_data)
418 {
419         unsigned long efer;
420
421         if (cpu_data->svm_state == SVMOFF)
422                 return;
423
424         cpu_data->svm_state = SVMOFF;
425
426         /* We are leaving - set the GIF */
427         asm volatile ("stgi" : : : "memory");
428
429         efer = read_msr(MSR_EFER);
430         efer &= ~EFER_SVME;
431         write_msr(MSR_EFER, efer);
432
433         write_msr(MSR_VM_HSAVE_PA, 0);
434 }
435
436 void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data)
437 {
438         unsigned long vmcb_pa, host_stack;
439
440         vmcb_pa = paging_hvirt2phys(&cpu_data->vmcb);
441         host_stack = (unsigned long)cpu_data->stack + sizeof(cpu_data->stack);
442
443         /* We enter Linux at the point arch_entry would return to as well.
444          * rax is cleared to signal success to the caller. */
445         asm volatile(
446                 "clgi\n\t"
447                 "mov (%%rdi),%%r15\n\t"
448                 "mov 0x8(%%rdi),%%r14\n\t"
449                 "mov 0x10(%%rdi),%%r13\n\t"
450                 "mov 0x18(%%rdi),%%r12\n\t"
451                 "mov 0x20(%%rdi),%%rbx\n\t"
452                 "mov 0x28(%%rdi),%%rbp\n\t"
453                 "mov %2,%%rsp\n\t"
454                 "vmload %%rax\n\t"
455                 "jmp svm_vmentry"
456                 : /* no output */
457                 : "D" (cpu_data->linux_reg), "a" (vmcb_pa), "m" (host_stack));
458         __builtin_unreachable();
459 }
460
461 void __attribute__((noreturn)) vcpu_deactivate_vmm(void)
462 {
463         struct per_cpu *cpu_data = this_cpu_data();
464         struct vmcb *vmcb = &cpu_data->vmcb;
465         unsigned long *stack = (unsigned long *)vmcb->rsp;
466         unsigned long linux_ip = vmcb->rip;
467
468         cpu_data->linux_cr0 = vmcb->cr0;
469         cpu_data->linux_cr3 = vmcb->cr3;
470
471         cpu_data->linux_gdtr.base = vmcb->gdtr.base;
472         cpu_data->linux_gdtr.limit = vmcb->gdtr.limit;
473         cpu_data->linux_idtr.base = vmcb->idtr.base;
474         cpu_data->linux_idtr.limit = vmcb->idtr.limit;
475
476         cpu_data->linux_cs.selector = vmcb->cs.selector;
477
478         asm volatile("str %0" : "=m" (cpu_data->linux_tss.selector));
479
480         cpu_data->linux_efer = vmcb->efer & (~EFER_SVME);
481         cpu_data->linux_fs.base = read_msr(MSR_FS_BASE);
482         cpu_data->linux_gs.base = vmcb->gs.base;
483
484         cpu_data->linux_ds.selector = vmcb->ds.selector;
485         cpu_data->linux_es.selector = vmcb->es.selector;
486
487         asm volatile("mov %%fs,%0" : "=m" (cpu_data->linux_fs.selector));
488         asm volatile("mov %%gs,%0" : "=m" (cpu_data->linux_gs.selector));
489
490         arch_cpu_restore(cpu_data, 0);
491
492         stack--;
493         *stack = linux_ip;
494
495         asm volatile (
496                 "mov %%rbx,%%rsp\n\t"
497                 "pop %%r15\n\t"
498                 "pop %%r14\n\t"
499                 "pop %%r13\n\t"
500                 "pop %%r12\n\t"
501                 "pop %%r11\n\t"
502                 "pop %%r10\n\t"
503                 "pop %%r9\n\t"
504                 "pop %%r8\n\t"
505                 "pop %%rdi\n\t"
506                 "pop %%rsi\n\t"
507                 "pop %%rbp\n\t"
508                 "add $8,%%rsp\n\t"
509                 "pop %%rbx\n\t"
510                 "pop %%rdx\n\t"
511                 "pop %%rcx\n\t"
512                 "mov %%rax,%%rsp\n\t"
513                 "xor %%rax,%%rax\n\t"
514                 "ret"
515                 : : "a" (stack), "b" (&cpu_data->guest_regs));
516         __builtin_unreachable();
517 }
518
519 void vcpu_vendor_reset(unsigned int sipi_vector)
520 {
521         static const struct svm_segment dataseg_reset_state = {
522                 .selector = 0,
523                 .base = 0,
524                 .limit = 0xffff,
525                 .access_rights = 0x0093,
526         };
527         static const struct svm_segment dtr_reset_state = {
528                 .selector = 0,
529                 .base = 0,
530                 .limit = 0xffff,
531                 .access_rights = 0,
532         };
533         struct per_cpu *cpu_data = this_cpu_data();
534         struct vmcb *vmcb = &cpu_data->vmcb;
535         unsigned long val;
536
537         vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
538         vmcb->cr3 = 0;
539         vmcb->cr4 = 0;
540
541         vmcb->rflags = 0x02;
542
543         val = 0;
544         if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
545                 val = 0xfff0;
546                 sipi_vector = 0xf0;
547         }
548         vmcb->rip = val;
549         vmcb->rsp = 0;
550
551         vmcb->cs.selector = sipi_vector << 8;
552         vmcb->cs.base = sipi_vector << 12;
553         vmcb->cs.limit = 0xffff;
554         vmcb->cs.access_rights = 0x009b;
555
556         vmcb->ds = dataseg_reset_state;
557         vmcb->es = dataseg_reset_state;
558         vmcb->fs = dataseg_reset_state;
559         vmcb->gs = dataseg_reset_state;
560         vmcb->ss = dataseg_reset_state;
561
562         vmcb->tr.selector = 0;
563         vmcb->tr.base = 0;
564         vmcb->tr.limit = 0xffff;
565         vmcb->tr.access_rights = 0x008b;
566
567         vmcb->ldtr.selector = 0;
568         vmcb->ldtr.base = 0;
569         vmcb->ldtr.limit = 0xffff;
570         vmcb->ldtr.access_rights = 0x0082;
571
572         vmcb->gdtr = dtr_reset_state;
573         vmcb->idtr = dtr_reset_state;
574
575         vmcb->efer = EFER_SVME;
576
577         /* These MSRs are undefined on reset */
578         vmcb->star = 0;
579         vmcb->lstar = 0;
580         vmcb->cstar = 0;
581         vmcb->sfmask = 0;
582         vmcb->sysenter_cs = 0;
583         vmcb->sysenter_eip = 0;
584         vmcb->sysenter_esp = 0;
585         vmcb->kerngsbase = 0;
586
587         vmcb->dr7 = 0x00000400;
588
589         vmcb->eventinj = 0;
590
591         /* Almost all of the guest state changed */
592         vmcb->clean_bits = 0;
593
594         svm_set_cell_config(cpu_data->cell, vmcb);
595
596         asm volatile(
597                 "vmload %%rax"
598                 : : "a" (paging_hvirt2phys(vmcb)) : "memory");
599         /* vmload overwrites GS_BASE - restore the host state */
600         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
601 }
602
603 void vcpu_skip_emulated_instruction(unsigned int inst_len)
604 {
605         this_cpu_data()->vmcb.rip += inst_len;
606 }
607
608 static void update_efer(struct vmcb *vmcb)
609 {
610         unsigned long efer = vmcb->efer;
611
612         if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
613                 return;
614
615         efer |= EFER_LMA;
616
617         /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
618         if ((vmcb->efer ^ efer) & EFER_LMA)
619                 vcpu_tlb_flush();
620
621         vmcb->efer = efer;
622         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
623 }
624
625 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
626 {
627         struct vmcb *vmcb = &this_cpu_data()->vmcb;
628
629         if (vmcb->efer & EFER_LMA) {
630                 pg_structs->root_paging = x86_64_paging;
631                 pg_structs->root_table_gphys =
632                         vmcb->cr3 & 0x000ffffffffff000UL;
633         } else if ((vmcb->cr0 & X86_CR0_PG) &&
634                    !(vmcb->cr4 & X86_CR4_PAE)) {
635                 pg_structs->root_paging = i386_paging;
636                 pg_structs->root_table_gphys =
637                         vmcb->cr3 & 0xfffff000UL;
638         } else if (!(vmcb->cr0 & X86_CR0_PG)) {
639                 /*
640                  * Can be in non-paged protected mode as well, but
641                  * the translation mechanism will stay the same ayway.
642                  */
643                 pg_structs->root_paging = realmode_paging;
644                 /*
645                  * This will make paging_get_guest_pages map the page
646                  * that also contains the bootstrap code and, thus, is
647                  * always present in a cell.
648                  */
649                 pg_structs->root_table_gphys = 0xff000;
650         } else {
651                 printk("FATAL: Unsupported paging mode\n");
652                 return false;
653         }
654         return true;
655 }
656
657 void vcpu_vendor_set_guest_pat(unsigned long val)
658 {
659         struct vmcb *vmcb = &this_cpu_data()->vmcb;
660
661         vmcb->g_pat = val;
662         vmcb->clean_bits &= ~CLEAN_BITS_NP;
663 }
664
665 struct parse_context {
666         unsigned int remaining;
667         unsigned int size;
668         unsigned long cs_base;
669         const u8 *inst;
670 };
671
672 static bool ctx_advance(struct parse_context *ctx,
673                         unsigned long *pc,
674                         struct guest_paging_structures *pg_structs)
675 {
676         if (!ctx->size) {
677                 ctx->size = ctx->remaining;
678                 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
679                                           &ctx->size);
680                 if (!ctx->inst)
681                         return false;
682                 ctx->remaining -= ctx->size;
683                 *pc += ctx->size;
684         }
685         return true;
686 }
687
688 static bool svm_parse_mov_to_cr(struct vmcb *vmcb, unsigned long pc,
689                                 unsigned char reg, unsigned long *gpr)
690 {
691         struct guest_paging_structures pg_structs;
692         struct parse_context ctx = {};
693         /* No prefixes are supported yet */
694         u8 opcodes[] = {0x0f, 0x22}, modrm;
695         int n;
696
697         ctx.remaining = ARRAY_SIZE(opcodes);
698         if (!vcpu_get_guest_paging_structs(&pg_structs))
699                 return false;
700         ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
701
702         if (!ctx_advance(&ctx, &pc, &pg_structs))
703                 return false;
704
705         for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++)
706                 if (*(ctx.inst) != opcodes[n] ||
707                     !ctx_advance(&ctx, &pc, &pg_structs))
708                         return false;
709
710         if (!ctx_advance(&ctx, &pc, &pg_structs))
711                 return false;
712
713         modrm = *(ctx.inst);
714
715         if (((modrm & 0x38) >> 3) != reg)
716                 return false;
717
718         if (gpr)
719                 *gpr = (modrm & 0x7);
720
721         return true;
722 }
723
724 /*
725  * XXX: The only visible reason to have this function (vmx.c consistency
726  * aside) is to prevent cells from setting invalid CD+NW combinations that
727  * result in no more than VMEXIT_INVALID. Maybe we can get along without it
728  * altogether?
729  */
730 static bool svm_handle_cr(struct per_cpu *cpu_data)
731 {
732         struct vmcb *vmcb = &cpu_data->vmcb;
733         /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
734         unsigned long reg = -1, val, bits;
735
736         if (has_assists) {
737                 if (!(vmcb->exitinfo1 & (1UL << 63))) {
738                         panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
739                         return false;
740                 }
741                 reg = vmcb->exitinfo1 & 0x07;
742         } else {
743                 if (!svm_parse_mov_to_cr(vmcb, vmcb->rip, 0, &reg)) {
744                         panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
745                         return false;
746                 }
747         }
748
749         if (reg == 4)
750                 val = vmcb->rsp;
751         else
752                 val = cpu_data->guest_regs.by_index[15 - reg];
753
754         vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
755         /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
756         bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
757         if ((val ^ vmcb->cr0) & bits)
758                 vcpu_tlb_flush();
759         /* TODO: better check for #GP reasons */
760         vmcb->cr0 = val & SVM_CR0_ALLOWED_BITS;
761         if (val & X86_CR0_PG)
762                 update_efer(vmcb);
763         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
764
765         return true;
766 }
767
768 static bool svm_handle_msr_write(struct per_cpu *cpu_data)
769 {
770         struct vmcb *vmcb = &cpu_data->vmcb;
771         unsigned long efer;
772
773         if (cpu_data->guest_regs.rcx == MSR_EFER) {
774                 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
775                 efer = get_wrmsr_value(&cpu_data->guest_regs) | EFER_SVME;
776                 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
777                 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
778                         vcpu_tlb_flush();
779                 vmcb->efer = efer;
780                 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
781                 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
782                 return true;
783         }
784
785         return vcpu_handle_msr_write();
786 }
787
788 /*
789  * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
790  * be treated separately in svm_handle_avic_access().
791  */
792 static bool svm_handle_apic_access(struct vmcb *vmcb)
793 {
794         struct guest_paging_structures pg_structs;
795         unsigned int inst_len, offset;
796         bool is_write;
797
798         /* The caller is responsible for sanity checks */
799         is_write = !!(vmcb->exitinfo1 & 0x2);
800         offset = vmcb->exitinfo2 - XAPIC_BASE;
801
802         if (offset & 0x00f)
803                 goto out_err;
804
805         if (!vcpu_get_guest_paging_structs(&pg_structs))
806                 goto out_err;
807
808         inst_len = apic_mmio_access(vmcb->rip, &pg_structs, offset >> 4,
809                                     is_write);
810         if (!inst_len)
811                 goto out_err;
812
813         vcpu_skip_emulated_instruction(inst_len);
814         return true;
815
816 out_err:
817         panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
818                      offset, is_write);
819         return false;
820 }
821
822 static void dump_guest_regs(union registers *guest_regs, struct vmcb *vmcb)
823 {
824         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
825                      vmcb->rsp, vmcb->rflags);
826         panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
827                      guest_regs->rbx, guest_regs->rcx);
828         panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
829                      guest_regs->rsi, guest_regs->rdi);
830         panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
831                      vmcb->cs.selector, vmcb->cs.base, vmcb->cs.access_rights,
832                      !!(vmcb->efer & EFER_LMA));
833         panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
834                      vmcb->cr3, vmcb->cr4);
835         panic_printk("EFER: %p\n", vmcb->efer);
836 }
837
838 void vcpu_vendor_get_io_intercept(struct vcpu_io_intercept *io)
839 {
840         struct vmcb *vmcb = &this_cpu_data()->vmcb;
841         u64 exitinfo = vmcb->exitinfo1;
842
843         /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
844         io->port = (exitinfo >> 16) & 0xFFFF;
845         io->size = (exitinfo >> 4) & 0x7;
846         io->in = !!(exitinfo & 0x1);
847         io->inst_len = vmcb->exitinfo2 - vmcb->rip;
848         io->rep_or_str = !!(exitinfo & 0x0c);
849 }
850
851 void vcpu_vendor_get_mmio_intercept(struct vcpu_mmio_intercept *mmio)
852 {
853         struct vmcb *vmcb = &this_cpu_data()->vmcb;
854
855         mmio->phys_addr = vmcb->exitinfo2;
856         mmio->is_write = !!(vmcb->exitinfo1 & 0x2);
857 }
858
859 void vcpu_handle_exit(struct per_cpu *cpu_data)
860 {
861         struct vmcb *vmcb = &cpu_data->vmcb;
862         bool res = false;
863
864         vmcb->gs.base = read_msr(MSR_GS_BASE);
865
866         /* Restore GS value expected by per_cpu data accessors */
867         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
868
869         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
870         /*
871          * All guest state is marked unmodified; individual handlers must clear
872          * the bits as needed.
873          */
874         vmcb->clean_bits = 0xffffffff;
875
876         switch (vmcb->exitcode) {
877         case VMEXIT_INVALID:
878                 panic_printk("FATAL: VM-Entry failure, error %d\n",
879                              vmcb->exitcode);
880                 break;
881         case VMEXIT_NMI:
882                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
883                 /* Temporarily enable GIF to consume pending NMI */
884                 asm volatile("stgi; clgi" : : : "memory");
885                 x86_check_events();
886                 goto vmentry;
887         case VMEXIT_VMMCALL:
888                 vcpu_handle_hypercall();
889                 goto vmentry;
890         case VMEXIT_CR0_SEL_WRITE:
891                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
892                 if (svm_handle_cr(cpu_data))
893                         goto vmentry;
894                 break;
895         case VMEXIT_CPUID:
896                 vcpu_handle_cpuid();
897                 goto vmentry;
898         case VMEXIT_MSR:
899                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
900                 if (!vmcb->exitinfo1)
901                         res = vcpu_handle_msr_read();
902                 else
903                         res = svm_handle_msr_write(cpu_data);
904                 if (res)
905                         goto vmentry;
906                 break;
907         case VMEXIT_NPF:
908                 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
909                      vmcb->exitinfo2 >= XAPIC_BASE &&
910                      vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
911                         /* APIC access in non-AVIC mode */
912                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
913                         if (svm_handle_apic_access(vmcb))
914                                 goto vmentry;
915                 } else {
916                         /* General MMIO (IOAPIC, PCI etc) */
917                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
918                         if (vcpu_handle_mmio_access())
919                                 goto vmentry;
920                 }
921                 break;
922         case VMEXIT_XSETBV:
923                 if (vcpu_handle_xsetbv())
924                         goto vmentry;
925                 break;
926         case VMEXIT_IOIO:
927                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
928                 if (vcpu_handle_io_access())
929                         goto vmentry;
930                 break;
931         case VMEXIT_EXCEPTION_DB:
932         case VMEXIT_EXCEPTION_AC:
933                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_EXCEPTION]++;
934                 /* Reinject exception, including error code if needed. */
935                 vmcb->eventinj = (vmcb->exitcode - VMEXIT_EXCEPTION_DE) |
936                         SVM_EVENTINJ_EXCEPTION | SVM_EVENTINJ_VALID;
937                 if (vmcb->exitcode == VMEXIT_EXCEPTION_AC) {
938                         vmcb->eventinj |= SVM_EVENTINJ_ERR_VALID;
939                         vmcb->eventinj_err = vmcb->exitinfo1;
940                 }
941                 x86_check_events();
942                 goto vmentry;
943         /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
944         default:
945                 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
946                              "exitinfo1 %p exitinfo2 %p\n",
947                              vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
948         }
949         dump_guest_regs(&cpu_data->guest_regs, vmcb);
950         panic_park();
951
952 vmentry:
953         write_msr(MSR_GS_BASE, vmcb->gs.base);
954 }
955
956 void vcpu_park(void)
957 {
958         vcpu_vendor_reset(APIC_BSP_PSEUDO_SIPI);
959         /* No need to clear VMCB Clean bit: vcpu_vendor_reset() already does
960          * this. */
961         this_cpu_data()->vmcb.n_cr3 = paging_hvirt2phys(parked_mode_npt);
962
963         vcpu_tlb_flush();
964 }
965
966 void vcpu_nmi_handler(void)
967 {
968 }
969
970 void vcpu_tlb_flush(void)
971 {
972         struct vmcb *vmcb = &this_cpu_data()->vmcb;
973
974         if (has_flush_by_asid)
975                 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
976         else
977                 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
978 }
979
980 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
981                               unsigned long pc, unsigned int *size)
982 {
983         struct vmcb *vmcb = &this_cpu_data()->vmcb;
984         unsigned long start;
985
986         if (has_assists) {
987                 if (!*size)
988                         return NULL;
989                 start = vmcb->rip - pc;
990                 if (start < vmcb->bytes_fetched) {
991                         *size = vmcb->bytes_fetched - start;
992                         return &vmcb->guest_bytes[start];
993                 } else {
994                         return NULL;
995                 }
996         } else {
997                 return vcpu_map_inst(pg_structs, pc, size);
998         }
999 }
1000
1001 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
1002                                     struct vcpu_io_bitmap *iobm)
1003 {
1004         iobm->data = cell->arch.svm.iopm;
1005         iobm->size = IOPM_PAGES * PAGE_SIZE;
1006 }
1007
1008 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
1009 {
1010         struct vmcb *vmcb = &this_cpu_data()->vmcb;
1011
1012         x_state->efer = vmcb->efer;
1013         x_state->rflags = vmcb->rflags;
1014         x_state->cs = vmcb->cs.selector;
1015         x_state->rip = vmcb->rip;
1016 }
1017
1018 /* GIF must be set for interrupts to be delivered (APMv2, Sect. 15.17) */
1019 void enable_irq(void)
1020 {
1021         asm volatile("stgi; sti" : : : "memory");
1022 }
1023
1024 /* Jailhouse runs with GIF cleared, so we need to restore this state */
1025 void disable_irq(void)
1026 {
1027         asm volatile("cli; clgi" : : : "memory");
1028 }