]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/svm.c
x86: Unify AMD page tables for CPU and IOMMU
[jailhouse.git] / hypervisor / arch / x86 / svm.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * Based on vmx.c written by Jan Kiszka.
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  */
16
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell.h>
19 #include <jailhouse/cell-config.h>
20 #include <jailhouse/control.h>
21 #include <jailhouse/paging.h>
22 #include <jailhouse/printk.h>
23 #include <jailhouse/processor.h>
24 #include <jailhouse/string.h>
25 #include <jailhouse/utils.h>
26 #include <asm/amd_iommu.h>
27 #include <asm/apic.h>
28 #include <asm/control.h>
29 #include <asm/iommu.h>
30 #include <asm/paging.h>
31 #include <asm/percpu.h>
32 #include <asm/processor.h>
33 #include <asm/svm.h>
34 #include <asm/vcpu.h>
35
36 /*
37  * NW bit is ignored by all modern processors, however some
38  * combinations of NW and CD bits are prohibited by SVM (see APMv2,
39  * Sect. 15.5). To handle this, we always keep the NW bit off.
40  */
41 #define SVM_CR0_ALLOWED_BITS            (~X86_CR0_NW)
42
43 /* IOPM size: two 4-K pages + 3 bits */
44 #define IOPM_PAGES                      3
45
46 #define NPT_IOMMU_PAGE_DIR_LEVELS       4
47
48 static bool has_avic, has_assists, has_flush_by_asid;
49
50 static const struct segment invalid_seg;
51
52 static struct paging npt_iommu_paging[NPT_IOMMU_PAGE_DIR_LEVELS];
53
54 /* bit cleared: direct access allowed */
55 // TODO: convert to whitelist
56 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
57         [ SVM_MSRPM_0000 ] = {
58                 [      0/4 ...  0x017/4 ] = 0,
59                 [  0x018/4 ...  0x01b/4 ] = 0x80, /* 0x01b (w) */
60                 [  0x01c/4 ...  0x1ff/4 ] = 0,
61                 [  0x200/4 ...  0x273/4 ] = 0xaa, /* 0x200 - 0x273 (w) */
62                 [  0x274/4 ...  0x277/4 ] = 0xea, /* 0x274 - 0x276 (w), 0x277 (rw) */
63                 [  0x278/4 ...  0x2fb/4 ] = 0,
64                 [  0x2fc/4 ...  0x2ff/4 ] = 0x80, /* 0x2ff (w) */
65                 [  0x300/4 ...  0x7ff/4 ] = 0,
66                 /* x2APIC MSRs - emulated if not present */
67                 [  0x800/4 ...  0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
68                 [  0x804/4 ...  0x807/4 ] = 0,
69                 [  0x808/4 ...  0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
70                 [  0x80c/4 ...  0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
71                 [  0x810/4 ...  0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
72                 [  0x814/4 ...  0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
73                 [  0x818/4 ...  0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
74                 [  0x81c/4 ...  0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
75                 [  0x820/4 ...  0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
76                 [  0x824/4 ...  0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
77                 [  0x828/4 ...  0x82b/4 ] = 0x03, /* 0x828 (rw) */
78                 [  0x82c/4 ...  0x82f/4 ] = 0xc0, /* 0x82f (rw) */
79                 [  0x830/4 ...  0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
80                 [  0x834/4 ...  0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
81                 [  0x838/4 ...  0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
82                 [  0x83c/4 ...  0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
83                 [  0x840/4 ... 0x1fff/4 ] = 0,
84         },
85         [ SVM_MSRPM_C000 ] = {
86                 [      0/4 ...  0x07f/4 ] = 0,
87                 [  0x080/4 ...  0x083/4 ] = 0x02, /* 0x080 (w) */
88                 [  0x084/4 ... 0x1fff/4 ] = 0
89         },
90         [ SVM_MSRPM_C001 ] = {
91                 [      0/4 ... 0x1fff/4 ] = 0,
92         },
93         [ SVM_MSRPM_RESV ] = {
94                 [      0/4 ... 0x1fff/4 ] = 0,
95         }
96 };
97
98 /* This page is mapped so the code begins at 0x000ffff0 */
99 static u8 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE] = {
100         [0xff0] = 0xfa, /* 1: cli */
101         [0xff1] = 0xf4, /*    hlt */
102         [0xff2] = 0xeb,
103         [0xff3] = 0xfc  /*    jmp 1b */
104 };
105
106 static void *parked_mode_npt;
107
108 static void *avic_page;
109
110 static int svm_check_features(void)
111 {
112         /* SVM is available */
113         if (!(cpuid_ecx(0x80000001, 0) & X86_FEATURE_SVM))
114                 return trace_error(-ENODEV);
115
116         /* Nested paging */
117         if (!(cpuid_edx(0x8000000A, 0) & X86_FEATURE_NP))
118                 return trace_error(-EIO);
119
120         /* Decode assists */
121         if ((cpuid_edx(0x8000000A, 0) & X86_FEATURE_DECODE_ASSISTS))
122                 has_assists = true;
123
124         /* AVIC support */
125         /* FIXME: Jailhouse support is incomplete so far
126         if (cpuid_edx(0x8000000A, 0) & X86_FEATURE_AVIC)
127                 has_avic = true; */
128
129         /* TLB Flush by ASID support */
130         if (cpuid_edx(0x8000000A, 0) & X86_FEATURE_FLUSH_BY_ASID)
131                 has_flush_by_asid = true;
132
133         return 0;
134 }
135
136 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
137                                      const struct desc_table_reg *dtr)
138 {
139         svm_segment->base = dtr->base;
140         svm_segment->limit = dtr->limit & 0xffff;
141 }
142
143 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
144                                          const struct segment *segment)
145 {
146         svm_segment->selector = segment->selector;
147         svm_segment->access_rights = ((segment->access_rights & 0xf000) >> 4) |
148                 (segment->access_rights & 0x00ff);
149         svm_segment->limit = segment->limit;
150         svm_segment->base = segment->base;
151 }
152
153 static void svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
154 {
155         vmcb->iopm_base_pa = paging_hvirt2phys(cell->arch.svm.iopm);
156         vmcb->n_cr3 =
157                 paging_hvirt2phys(cell->arch.svm.npt_iommu_structs.root_table);
158 }
159
160 static void vmcb_setup(struct per_cpu *cpu_data)
161 {
162         struct vmcb *vmcb = &cpu_data->vmcb;
163
164         memset(vmcb, 0, sizeof(struct vmcb));
165
166         vmcb->cr0 = cpu_data->linux_cr0 & SVM_CR0_ALLOWED_BITS;
167         vmcb->cr3 = cpu_data->linux_cr3;
168         vmcb->cr4 = cpu_data->linux_cr4;
169
170         set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
171         set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
172         set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
173         set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
174         set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
175         set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
176         set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
177         set_svm_segment_from_segment(&vmcb->ldtr, &invalid_seg);
178
179         set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
180         set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
181
182         vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
183
184         vmcb->rflags = 0x02;
185         /* Indicate success to the caller of arch_entry */
186         vmcb->rax = 0;
187         vmcb->rsp = cpu_data->linux_sp +
188                 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
189         vmcb->rip = cpu_data->linux_ip;
190
191         vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
192         vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
193         vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
194         vmcb->star = read_msr(MSR_STAR);
195         vmcb->lstar = read_msr(MSR_LSTAR);
196         vmcb->cstar = read_msr(MSR_CSTAR);
197         vmcb->sfmask = read_msr(MSR_SFMASK);
198         vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
199
200         vmcb->dr6 = 0x00000ff0;
201         vmcb->dr7 = 0x00000400;
202
203         /* Make the hypervisor visible */
204         vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
205
206         vmcb->g_pat = cpu_data->pat;
207
208         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
209         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
210         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID;
211         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
212         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
213         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
214
215         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
216         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
217
218         /*
219          * We only intercept #DB and #AC to prevent that malicious guests can
220          * trigger infinite loops in microcode (see e.g. CVE-2015-5307 and
221          * CVE-2015-8104).
222          */
223         vmcb->exception_intercepts |= (1 << DB_VECTOR) | (1 << AC_VECTOR);
224
225         vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
226
227         vmcb->np_enable = 1;
228         /* No more than one guest owns the CPU */
229         vmcb->guest_asid = 1;
230
231         /* TODO: Setup AVIC */
232
233         /* Explicitly mark all of the state as new */
234         vmcb->clean_bits = 0;
235
236         svm_set_cell_config(cpu_data->cell, vmcb);
237 }
238
239 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
240                                      unsigned long gphys,
241                                      unsigned long flags)
242 {
243         return paging_virt2phys(&cpu_data->cell->arch.svm.npt_iommu_structs,
244                                 gphys, flags);
245 }
246
247 static void npt_iommu_set_next_pt_l4(pt_entry_t pte, unsigned long next_pt)
248 {
249         /*
250          * Merge IOMMU and NPT flags. We need to mark the NTP entries as user
251          * accessible, see APMv2, Section 15.25.5.
252          */
253         *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(3) |
254                 AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P |
255                 PAGE_DEFAULT_FLAGS | PAGE_FLAG_US;
256 }
257
258 static void npt_iommu_set_next_pt_l3(pt_entry_t pte, unsigned long next_pt)
259 {
260         *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(2) |
261                 AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P |
262                 PAGE_DEFAULT_FLAGS | PAGE_FLAG_US;
263 }
264
265 static void npt_iommu_set_next_pt_l2(pt_entry_t pte, unsigned long next_pt)
266 {
267         *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(1) |
268                 AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P |
269                 PAGE_DEFAULT_FLAGS | PAGE_FLAG_US;
270 }
271
272 static unsigned long npt_iommu_get_phys_l3(pt_entry_t pte, unsigned long virt)
273 {
274         if (*pte & AMD_IOMMU_PTE_PG_MODE_MASK)
275                 return INVALID_PHYS_ADDR;
276         return (*pte & BIT_MASK(51, 30)) | (virt & BIT_MASK(29, 0));
277 }
278
279 static unsigned long npt_iommu_get_phys_l2(pt_entry_t pte, unsigned long virt)
280 {
281         if (*pte & AMD_IOMMU_PTE_PG_MODE_MASK)
282                 return INVALID_PHYS_ADDR;
283         return (*pte & BIT_MASK(51, 21)) | (virt & BIT_MASK(20, 0));
284 }
285
286 int vcpu_vendor_init(void)
287 {
288         struct paging_structures parking_pt;
289         unsigned long vm_cr;
290         int err;
291
292         err = svm_check_features();
293         if (err)
294                 return err;
295
296         vm_cr = read_msr(MSR_VM_CR);
297         if (vm_cr & VM_CR_SVMDIS)
298                 /* SVM disabled in BIOS */
299                 return trace_error(-EPERM);
300
301         /*
302          * Nested paging is almost the same as the native one. However, we
303          * need to override some handlers in order to reuse the page table for
304          * the IOMMU as well.
305          */
306         memcpy(npt_iommu_paging, x86_64_paging, sizeof(npt_iommu_paging));
307         npt_iommu_paging[0].set_next_pt = npt_iommu_set_next_pt_l4;
308         npt_iommu_paging[1].set_next_pt = npt_iommu_set_next_pt_l3;
309         npt_iommu_paging[2].set_next_pt = npt_iommu_set_next_pt_l2;
310         npt_iommu_paging[1].get_phys = npt_iommu_get_phys_l3;
311         npt_iommu_paging[2].get_phys = npt_iommu_get_phys_l2;
312
313         /* Map guest parking code (shared between cells and CPUs) */
314         parking_pt.root_paging = npt_iommu_paging;
315         parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
316         if (!parked_mode_npt)
317                 return -ENOMEM;
318         err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
319                             PAGE_SIZE, 0x000ff000,
320                             PAGE_READONLY_FLAGS | PAGE_FLAG_US,
321                             PAGING_NON_COHERENT);
322         if (err)
323                 return err;
324
325         /* This is always false for AMD now (except in nested SVM);
326            see Sect. 16.3.1 in APMv2 */
327         if (using_x2apic) {
328                 /* allow direct x2APIC access except for ICR writes */
329                 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
330                                 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
331                 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
332         } else {
333                 if (has_avic) {
334                         avic_page = page_alloc(&remap_pool, 1);
335                         if (!avic_page)
336                                 return trace_error(-ENOMEM);
337                 }
338         }
339
340         return vcpu_cell_init(&root_cell);
341 }
342
343 int vcpu_vendor_cell_init(struct cell *cell)
344 {
345         int err = -ENOMEM;
346         u64 flags;
347
348         /* allocate iopm  */
349         cell->arch.svm.iopm = page_alloc(&mem_pool, IOPM_PAGES);
350         if (!cell->arch.svm.iopm)
351                 return err;
352
353         /* build root NPT of cell */
354         cell->arch.svm.npt_iommu_structs.root_paging = npt_iommu_paging;
355         cell->arch.svm.npt_iommu_structs.root_table =
356                 (page_table_t)cell->arch.root_table_page;
357
358         if (!has_avic) {
359                 /*
360                  * Map xAPIC as is; reads are passed, writes are trapped.
361                  */
362                 flags = PAGE_READONLY_FLAGS | PAGE_FLAG_US | PAGE_FLAG_DEVICE;
363                 err = paging_create(&cell->arch.svm.npt_iommu_structs,
364                                     XAPIC_BASE, PAGE_SIZE, XAPIC_BASE,
365                                     flags, PAGING_NON_COHERENT);
366         } else {
367                 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE;
368                 err = paging_create(&cell->arch.svm.npt_iommu_structs,
369                                     paging_hvirt2phys(avic_page),
370                                     PAGE_SIZE, XAPIC_BASE,
371                                     flags, PAGING_NON_COHERENT);
372         }
373         if (err)
374                 goto err_free_iopm;
375
376         return 0;
377
378 err_free_iopm:
379         page_free(&mem_pool, cell->arch.svm.iopm, 3);
380
381         return err;
382 }
383
384 int vcpu_map_memory_region(struct cell *cell,
385                            const struct jailhouse_memory *mem)
386 {
387         u64 phys_start = mem->phys_start;
388         u64 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
389
390         if (mem->flags & JAILHOUSE_MEM_READ)
391                 flags |= PAGE_FLAG_PRESENT;
392         if (mem->flags & JAILHOUSE_MEM_WRITE)
393                 flags |= PAGE_FLAG_RW;
394         if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
395                 flags |= PAGE_FLAG_NOEXECUTE;
396         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
397                 phys_start = paging_hvirt2phys(&cell->comm_page);
398
399         flags |= amd_iommu_get_memory_region_flags(mem);
400
401         /*
402          * As we also manipulate the IOMMU page table, changes need to be
403          * coherent.
404          */
405         return paging_create(&cell->arch.svm.npt_iommu_structs, phys_start,
406                              mem->size, mem->virt_start, flags,
407                              PAGING_COHERENT);
408 }
409
410 int vcpu_unmap_memory_region(struct cell *cell,
411                              const struct jailhouse_memory *mem)
412 {
413         return paging_destroy(&cell->arch.svm.npt_iommu_structs,
414                               mem->virt_start, mem->size, PAGING_COHERENT);
415 }
416
417 void vcpu_vendor_cell_exit(struct cell *cell)
418 {
419         paging_destroy(&cell->arch.svm.npt_iommu_structs, XAPIC_BASE,
420                        PAGE_SIZE, PAGING_NON_COHERENT);
421         page_free(&mem_pool, cell->arch.svm.iopm, 3);
422 }
423
424 int vcpu_init(struct per_cpu *cpu_data)
425 {
426         unsigned long efer;
427         int err;
428
429         err = svm_check_features();
430         if (err)
431                 return err;
432
433         efer = read_msr(MSR_EFER);
434         if (efer & EFER_SVME)
435                 return trace_error(-EBUSY);
436
437         efer |= EFER_SVME;
438         write_msr(MSR_EFER, efer);
439
440         cpu_data->svm_state = SVMON;
441
442         vmcb_setup(cpu_data);
443
444         /*
445          * APM Volume 2, 3.1.1: "When writing the CR0 register, software should
446          * set the values of reserved bits to the values found during the
447          * previous CR0 read."
448          * But we want to avoid surprises with new features unknown to us but
449          * set by Linux. So check if any assumed revered bit was set and bail
450          * out if so.
451          * Note that the APM defines all reserved CR4 bits as must-be-zero.
452          */
453         if (cpu_data->linux_cr0 & X86_CR0_RESERVED)
454                 return -EIO;
455
456         /* bring CR0 and CR4 into well-defined states */
457         write_cr0(X86_CR0_HOST_STATE);
458         write_cr4(X86_CR4_HOST_STATE);
459
460         write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
461
462         return 0;
463 }
464
465 void vcpu_exit(struct per_cpu *cpu_data)
466 {
467         unsigned long efer;
468
469         if (cpu_data->svm_state == SVMOFF)
470                 return;
471
472         cpu_data->svm_state = SVMOFF;
473
474         /* We are leaving - set the GIF */
475         asm volatile ("stgi" : : : "memory");
476
477         efer = read_msr(MSR_EFER);
478         efer &= ~EFER_SVME;
479         write_msr(MSR_EFER, efer);
480
481         write_msr(MSR_VM_HSAVE_PA, 0);
482 }
483
484 void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data)
485 {
486         unsigned long vmcb_pa, host_stack;
487
488         vmcb_pa = paging_hvirt2phys(&cpu_data->vmcb);
489         host_stack = (unsigned long)cpu_data->stack + sizeof(cpu_data->stack);
490
491         /* We enter Linux at the point arch_entry would return to as well.
492          * rax is cleared to signal success to the caller. */
493         asm volatile(
494                 "clgi\n\t"
495                 "mov (%%rdi),%%r15\n\t"
496                 "mov 0x8(%%rdi),%%r14\n\t"
497                 "mov 0x10(%%rdi),%%r13\n\t"
498                 "mov 0x18(%%rdi),%%r12\n\t"
499                 "mov 0x20(%%rdi),%%rbx\n\t"
500                 "mov 0x28(%%rdi),%%rbp\n\t"
501                 "mov %2,%%rsp\n\t"
502                 "vmload %%rax\n\t"
503                 "jmp svm_vmentry"
504                 : /* no output */
505                 : "D" (cpu_data->linux_reg), "a" (vmcb_pa), "m" (host_stack));
506         __builtin_unreachable();
507 }
508
509 void __attribute__((noreturn)) vcpu_deactivate_vmm(void)
510 {
511         struct per_cpu *cpu_data = this_cpu_data();
512         struct vmcb *vmcb = &cpu_data->vmcb;
513         unsigned long *stack = (unsigned long *)vmcb->rsp;
514         unsigned long linux_ip = vmcb->rip;
515
516         cpu_data->linux_cr0 = vmcb->cr0;
517         cpu_data->linux_cr3 = vmcb->cr3;
518
519         cpu_data->linux_gdtr.base = vmcb->gdtr.base;
520         cpu_data->linux_gdtr.limit = vmcb->gdtr.limit;
521         cpu_data->linux_idtr.base = vmcb->idtr.base;
522         cpu_data->linux_idtr.limit = vmcb->idtr.limit;
523
524         cpu_data->linux_cs.selector = vmcb->cs.selector;
525
526         asm volatile("str %0" : "=m" (cpu_data->linux_tss.selector));
527
528         cpu_data->linux_efer = vmcb->efer & (~EFER_SVME);
529         cpu_data->linux_fs.base = read_msr(MSR_FS_BASE);
530         cpu_data->linux_gs.base = vmcb->gs.base;
531
532         cpu_data->linux_ds.selector = vmcb->ds.selector;
533         cpu_data->linux_es.selector = vmcb->es.selector;
534
535         asm volatile("mov %%fs,%0" : "=m" (cpu_data->linux_fs.selector));
536         asm volatile("mov %%gs,%0" : "=m" (cpu_data->linux_gs.selector));
537
538         arch_cpu_restore(cpu_data, 0);
539
540         stack--;
541         *stack = linux_ip;
542
543         asm volatile (
544                 "mov %%rbx,%%rsp\n\t"
545                 "pop %%r15\n\t"
546                 "pop %%r14\n\t"
547                 "pop %%r13\n\t"
548                 "pop %%r12\n\t"
549                 "pop %%r11\n\t"
550                 "pop %%r10\n\t"
551                 "pop %%r9\n\t"
552                 "pop %%r8\n\t"
553                 "pop %%rdi\n\t"
554                 "pop %%rsi\n\t"
555                 "pop %%rbp\n\t"
556                 "add $8,%%rsp\n\t"
557                 "pop %%rbx\n\t"
558                 "pop %%rdx\n\t"
559                 "pop %%rcx\n\t"
560                 "mov %%rax,%%rsp\n\t"
561                 "xor %%rax,%%rax\n\t"
562                 "ret"
563                 : : "a" (stack), "b" (&cpu_data->guest_regs));
564         __builtin_unreachable();
565 }
566
567 void vcpu_vendor_reset(unsigned int sipi_vector)
568 {
569         static const struct svm_segment dataseg_reset_state = {
570                 .selector = 0,
571                 .base = 0,
572                 .limit = 0xffff,
573                 .access_rights = 0x0093,
574         };
575         static const struct svm_segment dtr_reset_state = {
576                 .selector = 0,
577                 .base = 0,
578                 .limit = 0xffff,
579                 .access_rights = 0,
580         };
581         struct per_cpu *cpu_data = this_cpu_data();
582         struct vmcb *vmcb = &cpu_data->vmcb;
583         unsigned long val;
584
585         vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
586         vmcb->cr3 = 0;
587         vmcb->cr4 = 0;
588
589         vmcb->rflags = 0x02;
590
591         val = 0;
592         if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
593                 val = 0xfff0;
594                 sipi_vector = 0xf0;
595         }
596         vmcb->rip = val;
597         vmcb->rsp = 0;
598
599         vmcb->cs.selector = sipi_vector << 8;
600         vmcb->cs.base = sipi_vector << 12;
601         vmcb->cs.limit = 0xffff;
602         vmcb->cs.access_rights = 0x009b;
603
604         vmcb->ds = dataseg_reset_state;
605         vmcb->es = dataseg_reset_state;
606         vmcb->fs = dataseg_reset_state;
607         vmcb->gs = dataseg_reset_state;
608         vmcb->ss = dataseg_reset_state;
609
610         vmcb->tr.selector = 0;
611         vmcb->tr.base = 0;
612         vmcb->tr.limit = 0xffff;
613         vmcb->tr.access_rights = 0x008b;
614
615         vmcb->ldtr.selector = 0;
616         vmcb->ldtr.base = 0;
617         vmcb->ldtr.limit = 0xffff;
618         vmcb->ldtr.access_rights = 0x0082;
619
620         vmcb->gdtr = dtr_reset_state;
621         vmcb->idtr = dtr_reset_state;
622
623         vmcb->efer = EFER_SVME;
624
625         /* These MSRs are undefined on reset */
626         vmcb->star = 0;
627         vmcb->lstar = 0;
628         vmcb->cstar = 0;
629         vmcb->sfmask = 0;
630         vmcb->sysenter_cs = 0;
631         vmcb->sysenter_eip = 0;
632         vmcb->sysenter_esp = 0;
633         vmcb->kerngsbase = 0;
634
635         vmcb->dr7 = 0x00000400;
636
637         vmcb->eventinj = 0;
638
639         /* Almost all of the guest state changed */
640         vmcb->clean_bits = 0;
641
642         svm_set_cell_config(cpu_data->cell, vmcb);
643
644         asm volatile(
645                 "vmload %%rax"
646                 : : "a" (paging_hvirt2phys(vmcb)) : "memory");
647         /* vmload overwrites GS_BASE - restore the host state */
648         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
649 }
650
651 void vcpu_skip_emulated_instruction(unsigned int inst_len)
652 {
653         this_cpu_data()->vmcb.rip += inst_len;
654 }
655
656 static void update_efer(struct vmcb *vmcb)
657 {
658         unsigned long efer = vmcb->efer;
659
660         if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
661                 return;
662
663         efer |= EFER_LMA;
664
665         /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
666         if ((vmcb->efer ^ efer) & EFER_LMA)
667                 vcpu_tlb_flush();
668
669         vmcb->efer = efer;
670         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
671 }
672
673 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
674 {
675         struct vmcb *vmcb = &this_cpu_data()->vmcb;
676
677         if (vmcb->efer & EFER_LMA) {
678                 pg_structs->root_paging = x86_64_paging;
679                 pg_structs->root_table_gphys = vmcb->cr3 & BIT_MASK(51, 12);
680         } else if ((vmcb->cr0 & X86_CR0_PG) &&
681                    !(vmcb->cr4 & X86_CR4_PAE)) {
682                 pg_structs->root_paging = i386_paging;
683                 pg_structs->root_table_gphys = vmcb->cr3 & BIT_MASK(31, 12);
684         } else if (!(vmcb->cr0 & X86_CR0_PG)) {
685                 /*
686                  * Can be in non-paged protected mode as well, but
687                  * the translation mechanism will stay the same ayway.
688                  */
689                 pg_structs->root_paging = realmode_paging;
690                 /*
691                  * This will make paging_get_guest_pages map the page
692                  * that also contains the bootstrap code and, thus, is
693                  * always present in a cell.
694                  */
695                 pg_structs->root_table_gphys = 0xff000;
696         } else {
697                 printk("FATAL: Unsupported paging mode\n");
698                 return false;
699         }
700         return true;
701 }
702
703 void vcpu_vendor_set_guest_pat(unsigned long val)
704 {
705         struct vmcb *vmcb = &this_cpu_data()->vmcb;
706
707         vmcb->g_pat = val;
708         vmcb->clean_bits &= ~CLEAN_BITS_NP;
709 }
710
711 struct parse_context {
712         unsigned int remaining;
713         unsigned int size;
714         unsigned long cs_base;
715         const u8 *inst;
716 };
717
718 static bool ctx_advance(struct parse_context *ctx,
719                         unsigned long *pc,
720                         struct guest_paging_structures *pg_structs)
721 {
722         if (!ctx->size) {
723                 ctx->size = ctx->remaining;
724                 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
725                                           &ctx->size);
726                 if (!ctx->inst)
727                         return false;
728                 ctx->remaining -= ctx->size;
729                 *pc += ctx->size;
730         }
731         return true;
732 }
733
734 static bool svm_parse_mov_to_cr(struct vmcb *vmcb, unsigned long pc,
735                                 unsigned char reg, unsigned long *gpr)
736 {
737         struct guest_paging_structures pg_structs;
738         struct parse_context ctx = {};
739         /* No prefixes are supported yet */
740         u8 opcodes[] = {0x0f, 0x22}, modrm;
741         int n;
742
743         ctx.remaining = ARRAY_SIZE(opcodes);
744         if (!vcpu_get_guest_paging_structs(&pg_structs))
745                 return false;
746         ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
747
748         if (!ctx_advance(&ctx, &pc, &pg_structs))
749                 return false;
750
751         for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++)
752                 if (*(ctx.inst) != opcodes[n] ||
753                     !ctx_advance(&ctx, &pc, &pg_structs))
754                         return false;
755
756         if (!ctx_advance(&ctx, &pc, &pg_structs))
757                 return false;
758
759         modrm = *(ctx.inst);
760
761         if (((modrm & 0x38) >> 3) != reg)
762                 return false;
763
764         if (gpr)
765                 *gpr = (modrm & 0x7);
766
767         return true;
768 }
769
770 /*
771  * XXX: The only visible reason to have this function (vmx.c consistency
772  * aside) is to prevent cells from setting invalid CD+NW combinations that
773  * result in no more than VMEXIT_INVALID. Maybe we can get along without it
774  * altogether?
775  */
776 static bool svm_handle_cr(struct per_cpu *cpu_data)
777 {
778         struct vmcb *vmcb = &cpu_data->vmcb;
779         /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
780         unsigned long reg = -1, val, bits;
781
782         if (has_assists) {
783                 if (!(vmcb->exitinfo1 & (1UL << 63))) {
784                         panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
785                         return false;
786                 }
787                 reg = vmcb->exitinfo1 & 0x07;
788         } else {
789                 if (!svm_parse_mov_to_cr(vmcb, vmcb->rip, 0, &reg)) {
790                         panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
791                         return false;
792                 }
793         }
794
795         if (reg == 4)
796                 val = vmcb->rsp;
797         else
798                 val = cpu_data->guest_regs.by_index[15 - reg];
799
800         vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
801         /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
802         bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
803         if ((val ^ vmcb->cr0) & bits)
804                 vcpu_tlb_flush();
805         /* TODO: better check for #GP reasons */
806         vmcb->cr0 = val & SVM_CR0_ALLOWED_BITS;
807         if (val & X86_CR0_PG)
808                 update_efer(vmcb);
809         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
810
811         return true;
812 }
813
814 static bool svm_handle_msr_write(struct per_cpu *cpu_data)
815 {
816         struct vmcb *vmcb = &cpu_data->vmcb;
817         unsigned long efer;
818
819         if (cpu_data->guest_regs.rcx == MSR_EFER) {
820                 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
821                 efer = get_wrmsr_value(&cpu_data->guest_regs) | EFER_SVME;
822                 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
823                 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
824                         vcpu_tlb_flush();
825                 vmcb->efer = efer;
826                 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
827                 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
828                 return true;
829         }
830
831         return vcpu_handle_msr_write();
832 }
833
834 /*
835  * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
836  * be treated separately in svm_handle_avic_access().
837  */
838 static bool svm_handle_apic_access(struct vmcb *vmcb)
839 {
840         struct guest_paging_structures pg_structs;
841         unsigned int inst_len, offset;
842         bool is_write;
843
844         /* The caller is responsible for sanity checks */
845         is_write = !!(vmcb->exitinfo1 & 0x2);
846         offset = vmcb->exitinfo2 - XAPIC_BASE;
847
848         if (offset & 0x00f)
849                 goto out_err;
850
851         if (!vcpu_get_guest_paging_structs(&pg_structs))
852                 goto out_err;
853
854         inst_len = apic_mmio_access(vmcb->rip, &pg_structs, offset >> 4,
855                                     is_write);
856         if (!inst_len)
857                 goto out_err;
858
859         vcpu_skip_emulated_instruction(inst_len);
860         return true;
861
862 out_err:
863         panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
864                      offset, is_write);
865         return false;
866 }
867
868 static void dump_guest_regs(union registers *guest_regs, struct vmcb *vmcb)
869 {
870         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
871                      vmcb->rsp, vmcb->rflags);
872         panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
873                      guest_regs->rbx, guest_regs->rcx);
874         panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
875                      guest_regs->rsi, guest_regs->rdi);
876         panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
877                      vmcb->cs.selector, vmcb->cs.base, vmcb->cs.access_rights,
878                      !!(vmcb->efer & EFER_LMA));
879         panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
880                      vmcb->cr3, vmcb->cr4);
881         panic_printk("EFER: %p\n", vmcb->efer);
882 }
883
884 void vcpu_vendor_get_io_intercept(struct vcpu_io_intercept *io)
885 {
886         struct vmcb *vmcb = &this_cpu_data()->vmcb;
887         u64 exitinfo = vmcb->exitinfo1;
888
889         /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
890         io->port = (exitinfo >> 16) & 0xFFFF;
891         io->size = (exitinfo >> 4) & 0x7;
892         io->in = !!(exitinfo & 0x1);
893         io->inst_len = vmcb->exitinfo2 - vmcb->rip;
894         io->rep_or_str = !!(exitinfo & 0x0c);
895 }
896
897 void vcpu_vendor_get_mmio_intercept(struct vcpu_mmio_intercept *mmio)
898 {
899         struct vmcb *vmcb = &this_cpu_data()->vmcb;
900
901         mmio->phys_addr = vmcb->exitinfo2;
902         mmio->is_write = !!(vmcb->exitinfo1 & 0x2);
903 }
904
905 void vcpu_handle_exit(struct per_cpu *cpu_data)
906 {
907         struct vmcb *vmcb = &cpu_data->vmcb;
908         bool res = false;
909
910         vmcb->gs.base = read_msr(MSR_GS_BASE);
911
912         /* Restore GS value expected by per_cpu data accessors */
913         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
914
915         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
916         /*
917          * All guest state is marked unmodified; individual handlers must clear
918          * the bits as needed.
919          */
920         vmcb->clean_bits = 0xffffffff;
921
922         switch (vmcb->exitcode) {
923         case VMEXIT_INVALID:
924                 panic_printk("FATAL: VM-Entry failure, error %d\n",
925                              vmcb->exitcode);
926                 break;
927         case VMEXIT_NMI:
928                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
929                 /* Temporarily enable GIF to consume pending NMI */
930                 asm volatile("stgi; clgi" : : : "memory");
931                 x86_check_events();
932                 goto vmentry;
933         case VMEXIT_VMMCALL:
934                 vcpu_handle_hypercall();
935                 goto vmentry;
936         case VMEXIT_CR0_SEL_WRITE:
937                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
938                 if (svm_handle_cr(cpu_data))
939                         goto vmentry;
940                 break;
941         case VMEXIT_CPUID:
942                 vcpu_handle_cpuid();
943                 goto vmentry;
944         case VMEXIT_MSR:
945                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
946                 if (!vmcb->exitinfo1)
947                         res = vcpu_handle_msr_read();
948                 else
949                         res = svm_handle_msr_write(cpu_data);
950                 if (res)
951                         goto vmentry;
952                 break;
953         case VMEXIT_NPF:
954                 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
955                      vmcb->exitinfo2 >= XAPIC_BASE &&
956                      vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
957                         /* APIC access in non-AVIC mode */
958                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
959                         if (svm_handle_apic_access(vmcb))
960                                 goto vmentry;
961                 } else {
962                         /* General MMIO (IOAPIC, PCI etc) */
963                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
964                         if (vcpu_handle_mmio_access())
965                                 goto vmentry;
966                 }
967                 break;
968         case VMEXIT_XSETBV:
969                 if (vcpu_handle_xsetbv())
970                         goto vmentry;
971                 break;
972         case VMEXIT_IOIO:
973                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
974                 if (vcpu_handle_io_access())
975                         goto vmentry;
976                 break;
977         case VMEXIT_EXCEPTION_DB:
978         case VMEXIT_EXCEPTION_AC:
979                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_EXCEPTION]++;
980                 /* Reinject exception, including error code if needed. */
981                 vmcb->eventinj = (vmcb->exitcode - VMEXIT_EXCEPTION_DE) |
982                         SVM_EVENTINJ_EXCEPTION | SVM_EVENTINJ_VALID;
983                 if (vmcb->exitcode == VMEXIT_EXCEPTION_AC) {
984                         vmcb->eventinj |= SVM_EVENTINJ_ERR_VALID;
985                         vmcb->eventinj_err = vmcb->exitinfo1;
986                 }
987                 x86_check_events();
988                 goto vmentry;
989         /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
990         default:
991                 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
992                              "exitinfo1 %p exitinfo2 %p\n",
993                              vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
994         }
995         dump_guest_regs(&cpu_data->guest_regs, vmcb);
996         panic_park();
997
998 vmentry:
999         write_msr(MSR_GS_BASE, vmcb->gs.base);
1000 }
1001
1002 void vcpu_park(void)
1003 {
1004         vcpu_vendor_reset(APIC_BSP_PSEUDO_SIPI);
1005         /* No need to clear VMCB Clean bit: vcpu_vendor_reset() already does
1006          * this. */
1007         this_cpu_data()->vmcb.n_cr3 = paging_hvirt2phys(parked_mode_npt);
1008
1009         vcpu_tlb_flush();
1010 }
1011
1012 void vcpu_nmi_handler(void)
1013 {
1014 }
1015
1016 void vcpu_tlb_flush(void)
1017 {
1018         struct vmcb *vmcb = &this_cpu_data()->vmcb;
1019
1020         if (has_flush_by_asid)
1021                 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
1022         else
1023                 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
1024 }
1025
1026 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
1027                               unsigned long pc, unsigned int *size)
1028 {
1029         struct vmcb *vmcb = &this_cpu_data()->vmcb;
1030         unsigned long start;
1031
1032         if (has_assists) {
1033                 if (!*size)
1034                         return NULL;
1035                 start = vmcb->rip - pc;
1036                 if (start < vmcb->bytes_fetched) {
1037                         *size = vmcb->bytes_fetched - start;
1038                         return &vmcb->guest_bytes[start];
1039                 } else {
1040                         return NULL;
1041                 }
1042         } else {
1043                 return vcpu_map_inst(pg_structs, pc, size);
1044         }
1045 }
1046
1047 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
1048                                     struct vcpu_io_bitmap *iobm)
1049 {
1050         iobm->data = cell->arch.svm.iopm;
1051         iobm->size = IOPM_PAGES * PAGE_SIZE;
1052 }
1053
1054 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
1055 {
1056         struct vmcb *vmcb = &this_cpu_data()->vmcb;
1057
1058         x_state->efer = vmcb->efer;
1059         x_state->rflags = vmcb->rflags;
1060         x_state->cs = vmcb->cs.selector;
1061         x_state->rip = vmcb->rip;
1062 }
1063
1064 /* GIF must be set for interrupts to be delivered (APMv2, Sect. 15.17) */
1065 void enable_irq(void)
1066 {
1067         asm volatile("stgi; sti" : : : "memory");
1068 }
1069
1070 /* Jailhouse runs with GIF cleared, so we need to restore this state */
1071 void disable_irq(void)
1072 {
1073         asm volatile("cli; clgi" : : : "memory");
1074 }