]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/svm.c
x86: Adjust output of SVM's dump_guest_regs
[jailhouse.git] / hypervisor / arch / x86 / svm.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * Based on vmx.c written by Jan Kiszka.
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  */
16
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/control.h>
20 #include <jailhouse/paging.h>
21 #include <jailhouse/printk.h>
22 #include <jailhouse/processor.h>
23 #include <jailhouse/string.h>
24 #include <jailhouse/utils.h>
25 #include <asm/apic.h>
26 #include <asm/cell.h>
27 #include <asm/control.h>
28 #include <asm/iommu.h>
29 #include <asm/paging.h>
30 #include <asm/percpu.h>
31 #include <asm/processor.h>
32 #include <asm/svm.h>
33 #include <asm/vcpu.h>
34
35 /*
36  * NW bit is ignored by all modern processors, however some
37  * combinations of NW and CD bits are prohibited by SVM (see APMv2,
38  * Sect. 15.5). To handle this, we always keep the NW bit off.
39  */
40 #define SVM_CR0_ALLOWED_BITS    (~X86_CR0_NW)
41
42 #define MTRR_DEFTYPE            0x2ff
43
44 #define PAT_RESET_VALUE         0x0007040600070406UL
45
46 static bool has_avic, has_assists, has_flush_by_asid;
47
48 static const struct segment invalid_seg;
49
50 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
51
52 /* bit cleared: direct access allowed */
53 // TODO: convert to whitelist
54 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
55         [ SVM_MSRPM_0000 ] = {
56                 [      0/4 ...  0x017/4 ] = 0,
57                 [  0x018/4 ...  0x01b/4 ] = 0x80, /* 0x01b (w) */
58                 [  0x01c/4 ...  0x2fb/4 ] = 0,
59                 [  0x2fc/4 ...  0x2ff/4 ] = 0x80, /* 0x2ff (w) */
60                 [  0x300/4 ...  0x7ff/4 ] = 0,
61                 /* x2APIC MSRs - emulated if not present */
62                 [  0x800/4 ...  0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
63                 [  0x804/4 ...  0x807/4 ] = 0,
64                 [  0x808/4 ...  0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
65                 [  0x80c/4 ...  0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
66                 [  0x810/4 ...  0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
67                 [  0x814/4 ...  0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
68                 [  0x818/4 ...  0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
69                 [  0x81c/4 ...  0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
70                 [  0x820/4 ...  0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
71                 [  0x824/4 ...  0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
72                 [  0x828/4 ...  0x82b/4 ] = 0x03, /* 0x828 (rw) */
73                 [  0x82c/4 ...  0x82f/4 ] = 0xc0, /* 0x82f (rw) */
74                 [  0x830/4 ...  0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
75                 [  0x834/4 ...  0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
76                 [  0x838/4 ...  0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
77                 [  0x83c/4 ...  0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
78                 [  0x840/4 ... 0x1fff/4 ] = 0,
79         },
80         [ SVM_MSRPM_C000 ] = {
81                 [      0/4 ...  0x07f/4 ] = 0,
82                 [  0x080/4 ...  0x083/4 ] = 0x02, /* 0x080 (w) */
83                 [  0x084/4 ... 0x1fff/4 ] = 0
84         },
85         [ SVM_MSRPM_C001 ] = {
86                 [      0/4 ... 0x1fff/4 ] = 0,
87         },
88         [ SVM_MSRPM_RESV ] = {
89                 [      0/4 ... 0x1fff/4 ] = 0,
90         }
91 };
92
93 /* This page is mapped so the code begins at 0x000ffff0 */
94 static u8 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE] = {
95         [0xff0] = 0xfa, /* 1: cli */
96         [0xff1] = 0xf4, /*    hlt */
97         [0xff2] = 0xeb,
98         [0xff3] = 0xfc  /*    jmp 1b */
99 };
100
101 static void *parked_mode_npt;
102
103 static void *avic_page;
104
105 static int svm_check_features(void)
106 {
107         /* SVM is available */
108         if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
109                 return trace_error(-ENODEV);
110
111         /* Nested paging */
112         if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
113                 return trace_error(-EIO);
114
115         /* Decode assists */
116         if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
117                 has_assists = true;
118
119         /* AVIC support */
120         if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
121                 has_avic = true;
122
123         /* TLB Flush by ASID support */
124         if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
125                 has_flush_by_asid = true;
126
127         return 0;
128 }
129
130 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
131                                      const struct desc_table_reg *dtr)
132 {
133         struct svm_segment tmp = { 0 };
134
135         if (dtr) {
136                 tmp.base = dtr->base;
137                 tmp.limit = dtr->limit & 0xffff;
138         }
139
140         *svm_segment = tmp;
141 }
142
143 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
144 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
145                                          const struct segment *segment)
146 {
147         u32 ar;
148
149         svm_segment->selector = segment->selector;
150
151         if (segment->access_rights == 0x10000) {
152                 svm_segment->access_rights = 0;
153         } else {
154                 ar = segment->access_rights;
155                 svm_segment->access_rights =
156                         ((ar & 0xf000) >> 4) | (ar & 0x00ff);
157         }
158
159         svm_segment->limit = segment->limit;
160         svm_segment->base = segment->base;
161 }
162
163 static bool svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
164 {
165         /* No real need for this function; used for consistency with vmx.c */
166         vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
167         vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
168
169         return true;
170 }
171
172 static int vmcb_setup(struct per_cpu *cpu_data)
173 {
174         struct vmcb *vmcb = &cpu_data->vmcb;
175
176         memset(vmcb, 0, sizeof(struct vmcb));
177
178         vmcb->cr0 = cpu_data->linux_cr0 & SVM_CR0_ALLOWED_BITS;
179         vmcb->cr3 = cpu_data->linux_cr3;
180         vmcb->cr4 = cpu_data->linux_cr4;
181
182         set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
183         set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
184         set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
185         set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
186         set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
187         set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
188         set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
189
190         set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
191         set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
192         set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
193
194         vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
195
196         vmcb->rflags = 0x02;
197         /* Indicate success to the caller of arch_entry */
198         vmcb->rax = 0;
199         vmcb->rsp = cpu_data->linux_sp +
200                 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
201         vmcb->rip = cpu_data->linux_ip;
202
203         vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
204         vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
205         vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
206         vmcb->star = read_msr(MSR_STAR);
207         vmcb->lstar = read_msr(MSR_LSTAR);
208         vmcb->cstar = read_msr(MSR_CSTAR);
209         vmcb->sfmask = read_msr(MSR_SFMASK);
210         vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
211
212         vmcb->dr6 = 0x00000ff0;
213         vmcb->dr7 = 0x00000400;
214
215         /* Make the hypervisor visible */
216         vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
217
218         /* Linux uses custom PAT setting */
219         vmcb->g_pat = read_msr(MSR_IA32_PAT);
220
221         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
222         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
223         /* TODO: Do we need this for SVM ? */
224         /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
225         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
226         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
227         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
228
229         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
230         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
231
232         vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
233
234         vmcb->np_enable = 1;
235         /* No more than one guest owns the CPU */
236         vmcb->guest_asid = 1;
237
238         /* TODO: Setup AVIC */
239
240         /* Explicitly mark all of the state as new */
241         vmcb->clean_bits = 0;
242
243         return svm_set_cell_config(cpu_data->cell, vmcb);
244 }
245
246 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
247                                      unsigned long gphys,
248                                      unsigned long flags)
249 {
250         return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
251                         gphys, flags);
252 }
253
254 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
255 {
256         /* See APMv2, Section 15.25.5 */
257         *pte = (next_pt & 0x000ffffffffff000UL) |
258                 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
259 }
260
261 int vcpu_vendor_init(void)
262 {
263         struct paging_structures parking_pt;
264         unsigned long vm_cr;
265         int err, n;
266
267         err = svm_check_features();
268         if (err)
269                 return err;
270
271         vm_cr = read_msr(MSR_VM_CR);
272         if (vm_cr & VM_CR_SVMDIS)
273                 /* SVM disabled in BIOS */
274                 return trace_error(-EPERM);
275
276         /* Nested paging is the same as the native one */
277         memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
278         for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
279                 npt_paging[n].set_next_pt = npt_set_next_pt;
280
281         /* Map guest parking code (shared between cells and CPUs) */
282         parking_pt.root_paging = npt_paging;
283         parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
284         if (!parked_mode_npt)
285                 return -ENOMEM;
286         err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
287                             PAGE_SIZE, 0x000ff000,
288                             PAGE_READONLY_FLAGS | PAGE_FLAG_US,
289                             PAGING_NON_COHERENT);
290         if (err)
291                 return err;
292
293         /* This is always false for AMD now (except in nested SVM);
294            see Sect. 16.3.1 in APMv2 */
295         if (using_x2apic) {
296                 /* allow direct x2APIC access except for ICR writes */
297                 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
298                                 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
299                 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
300         } else {
301                 if (has_avic) {
302                         avic_page = page_alloc(&remap_pool, 1);
303                         if (!avic_page)
304                                 return trace_error(-ENOMEM);
305                 }
306         }
307
308         return vcpu_cell_init(&root_cell);
309 }
310
311 int vcpu_vendor_cell_init(struct cell *cell)
312 {
313         u64 flags;
314         int err;
315
316         /* allocate iopm (two 4-K pages + 3 bits) */
317         cell->svm.iopm = page_alloc(&mem_pool, 3);
318         if (!cell->svm.iopm)
319                 return -ENOMEM;
320
321         /* build root NPT of cell */
322         cell->svm.npt_structs.root_paging = npt_paging;
323         cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
324         if (!cell->svm.npt_structs.root_table)
325                 return -ENOMEM;
326
327         if (!has_avic) {
328                 /*
329                  * Map xAPIC as is; reads are passed, writes are trapped.
330                  */
331                 flags = PAGE_READONLY_FLAGS | PAGE_FLAG_US | PAGE_FLAG_DEVICE;
332                 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
333                                     PAGE_SIZE, XAPIC_BASE,
334                                     flags,
335                                     PAGING_NON_COHERENT);
336         } else {
337                 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE;
338                 err = paging_create(&cell->svm.npt_structs,
339                                     paging_hvirt2phys(avic_page),
340                                     PAGE_SIZE, XAPIC_BASE,
341                                     flags,
342                                     PAGING_NON_COHERENT);
343         }
344
345         return err;
346 }
347
348 int vcpu_map_memory_region(struct cell *cell,
349                            const struct jailhouse_memory *mem)
350 {
351         u64 phys_start = mem->phys_start;
352         u64 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
353
354         if (mem->flags & JAILHOUSE_MEM_READ)
355                 flags |= PAGE_FLAG_PRESENT;
356         if (mem->flags & JAILHOUSE_MEM_WRITE)
357                 flags |= PAGE_FLAG_RW;
358         if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
359                 flags |= PAGE_FLAG_NOEXECUTE;
360         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
361                 phys_start = paging_hvirt2phys(&cell->comm_page);
362
363         return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
364                              mem->virt_start, flags, PAGING_NON_COHERENT);
365 }
366
367 int vcpu_unmap_memory_region(struct cell *cell,
368                              const struct jailhouse_memory *mem)
369 {
370         return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
371                               mem->size, PAGING_NON_COHERENT);
372 }
373
374 void vcpu_vendor_cell_exit(struct cell *cell)
375 {
376         paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
377                        PAGING_NON_COHERENT);
378         page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
379 }
380
381 int vcpu_init(struct per_cpu *cpu_data)
382 {
383         unsigned long efer;
384         int err;
385
386         err = svm_check_features();
387         if (err)
388                 return err;
389
390         efer = read_msr(MSR_EFER);
391         if (efer & EFER_SVME)
392                 return trace_error(-EBUSY);
393
394         efer |= EFER_SVME;
395         write_msr(MSR_EFER, efer);
396
397         cpu_data->svm_state = SVMON;
398
399         if (!vmcb_setup(cpu_data))
400                 return trace_error(-EIO);
401
402         /*
403          * APM Volume 2, 3.1.1: "When writing the CR0 register, software should
404          * set the values of reserved bits to the values found during the
405          * previous CR0 read."
406          * But we want to avoid surprises with new features unknown to us but
407          * set by Linux. So check if any assumed revered bit was set and bail
408          * out if so.
409          * Note that the APM defines all reserved CR4 bits as must-be-zero.
410          */
411         if (cpu_data->linux_cr0 & X86_CR0_RESERVED)
412                 return -EIO;
413
414         /* bring CR0 and CR4 into well-defined states */
415         write_cr0(X86_CR0_HOST_STATE);
416         write_cr4(X86_CR4_HOST_STATE);
417
418         write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
419
420         return 0;
421 }
422
423 void vcpu_exit(struct per_cpu *cpu_data)
424 {
425         unsigned long efer;
426
427         if (cpu_data->svm_state == SVMOFF)
428                 return;
429
430         cpu_data->svm_state = SVMOFF;
431
432         /* We are leaving - set the GIF */
433         asm volatile ("stgi" : : : "memory");
434
435         efer = read_msr(MSR_EFER);
436         efer &= ~EFER_SVME;
437         write_msr(MSR_EFER, efer);
438
439         write_msr(MSR_VM_HSAVE_PA, 0);
440 }
441
442 void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data)
443 {
444         unsigned long vmcb_pa, host_stack;
445
446         vmcb_pa = paging_hvirt2phys(&cpu_data->vmcb);
447         host_stack = (unsigned long)cpu_data->stack + sizeof(cpu_data->stack);
448
449         /*
450          * XXX: Jailhouse doesn't use PAT, so it is explicitly set to the
451          * reset value. However, this value is later combined with vmcb->g_pat
452          * (as per APMv2, Sect. 15.25.8) which may lead to subtle bugs as the
453          * actual memory type might slightly differ from what Linux expects.
454          */
455         write_msr(MSR_IA32_PAT, PAT_RESET_VALUE);
456
457         /* We enter Linux at the point arch_entry would return to as well.
458          * rax is cleared to signal success to the caller. */
459         asm volatile(
460                 "clgi\n\t"
461                 "mov (%%rdi),%%r15\n\t"
462                 "mov 0x8(%%rdi),%%r14\n\t"
463                 "mov 0x10(%%rdi),%%r13\n\t"
464                 "mov 0x18(%%rdi),%%r12\n\t"
465                 "mov 0x20(%%rdi),%%rbx\n\t"
466                 "mov 0x28(%%rdi),%%rbp\n\t"
467                 "mov %0, %%rax\n\t"
468                 "vmload %%rax\n\t"
469                 "vmrun %%rax\n\t"
470                 "vmsave %%rax\n\t"
471                 /* Restore hypervisor stack */
472                 "mov %2, %%rsp\n\t"
473                 "jmp svm_vmexit"
474                 : /* no output */
475                 : "m" (vmcb_pa), "D" (cpu_data->linux_reg), "m" (host_stack)
476                 : "memory", "r15", "r14", "r13", "r12",
477                   "rbx", "rbp", "rax", "cc");
478         __builtin_unreachable();
479 }
480
481 void __attribute__((noreturn))
482 vcpu_deactivate_vmm(struct registers *guest_regs)
483 {
484         struct per_cpu *cpu_data = this_cpu_data();
485         struct vmcb *vmcb = &cpu_data->vmcb;
486         unsigned long *stack = (unsigned long *)vmcb->rsp;
487         unsigned long linux_ip = vmcb->rip;
488
489         /*
490          * Restore the MSRs.
491          *
492          * XXX: One could argue this is better to be done in
493          * arch_cpu_restore(), however, it would require changes
494          * to cpu_data to store STAR and friends.
495          */
496         write_msr(MSR_STAR, vmcb->star);
497         write_msr(MSR_LSTAR, vmcb->lstar);
498         write_msr(MSR_CSTAR, vmcb->cstar);
499         write_msr(MSR_SFMASK, vmcb->sfmask);
500         write_msr(MSR_KERNGS_BASE, vmcb->kerngsbase);
501         write_msr(MSR_IA32_PAT, vmcb->g_pat);
502
503         cpu_data->linux_cr0 = vmcb->cr0;
504         cpu_data->linux_cr3 = vmcb->cr3;
505
506         cpu_data->linux_gdtr.base = vmcb->gdtr.base;
507         cpu_data->linux_gdtr.limit = vmcb->gdtr.limit;
508         cpu_data->linux_idtr.base = vmcb->idtr.base;
509         cpu_data->linux_idtr.limit = vmcb->idtr.limit;
510
511         cpu_data->linux_cs.selector = vmcb->cs.selector;
512
513         cpu_data->linux_tss.selector = vmcb->tr.selector;
514
515         cpu_data->linux_efer = vmcb->efer & (~EFER_SVME);
516         cpu_data->linux_fs.base = vmcb->fs.base;
517         cpu_data->linux_gs.base = vmcb->gs.base;
518
519         cpu_data->linux_sysenter_cs = vmcb->sysenter_cs;
520         cpu_data->linux_sysenter_eip = vmcb->sysenter_eip;
521         cpu_data->linux_sysenter_esp = vmcb->sysenter_esp;
522
523         cpu_data->linux_ds.selector = vmcb->ds.selector;
524         cpu_data->linux_es.selector = vmcb->es.selector;
525         cpu_data->linux_fs.selector = vmcb->fs.selector;
526         cpu_data->linux_gs.selector = vmcb->gs.selector;
527
528         arch_cpu_restore(cpu_data, 0);
529
530         stack--;
531         *stack = linux_ip;
532
533         asm volatile (
534                 "mov %%rbx,%%rsp\n\t"
535                 "pop %%r15\n\t"
536                 "pop %%r14\n\t"
537                 "pop %%r13\n\t"
538                 "pop %%r12\n\t"
539                 "pop %%r11\n\t"
540                 "pop %%r10\n\t"
541                 "pop %%r9\n\t"
542                 "pop %%r8\n\t"
543                 "pop %%rdi\n\t"
544                 "pop %%rsi\n\t"
545                 "pop %%rbp\n\t"
546                 "add $8,%%rsp\n\t"
547                 "pop %%rbx\n\t"
548                 "pop %%rdx\n\t"
549                 "pop %%rcx\n\t"
550                 "mov %%rax,%%rsp\n\t"
551                 "xor %%rax,%%rax\n\t"
552                 "ret"
553                 : : "a" (stack), "b" (guest_regs));
554         __builtin_unreachable();
555 }
556
557 static void svm_vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
558 {
559         struct vmcb *vmcb = &cpu_data->vmcb;
560         unsigned long val;
561         bool ok = true;
562
563         vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
564         vmcb->cr3 = 0;
565         vmcb->cr4 = 0;
566
567         vmcb->rflags = 0x02;
568
569         val = 0;
570         if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
571                 val = 0xfff0;
572                 sipi_vector = 0xf0;
573         }
574         vmcb->rip = val;
575         vmcb->rsp = 0;
576
577         vmcb->cs.selector = sipi_vector << 8;
578         vmcb->cs.base = sipi_vector << 12;
579         vmcb->cs.limit = 0xffff;
580         vmcb->cs.access_rights = 0x009b;
581
582         vmcb->ds.selector = 0;
583         vmcb->ds.base = 0;
584         vmcb->ds.limit = 0xffff;
585         vmcb->ds.access_rights = 0x0093;
586
587         vmcb->es.selector = 0;
588         vmcb->es.base = 0;
589         vmcb->es.limit = 0xffff;
590         vmcb->es.access_rights = 0x0093;
591
592         vmcb->fs.selector = 0;
593         vmcb->fs.base = 0;
594         vmcb->fs.limit = 0xffff;
595         vmcb->fs.access_rights = 0x0093;
596
597         vmcb->gs.selector = 0;
598         vmcb->gs.base = 0;
599         vmcb->gs.limit = 0xffff;
600         vmcb->gs.access_rights = 0x0093;
601
602         vmcb->ss.selector = 0;
603         vmcb->ss.base = 0;
604         vmcb->ss.limit = 0xffff;
605         vmcb->ss.access_rights = 0x0093;
606
607         vmcb->tr.selector = 0;
608         vmcb->tr.base = 0;
609         vmcb->tr.limit = 0xffff;
610         vmcb->tr.access_rights = 0x008b;
611
612         vmcb->ldtr.selector = 0;
613         vmcb->ldtr.base = 0;
614         vmcb->ldtr.limit = 0xffff;
615         vmcb->ldtr.access_rights = 0x0082;
616
617         vmcb->gdtr.selector = 0;
618         vmcb->gdtr.base = 0;
619         vmcb->gdtr.limit = 0xffff;
620         vmcb->gdtr.access_rights = 0;
621
622         vmcb->idtr.selector = 0;
623         vmcb->idtr.base = 0;
624         vmcb->idtr.limit = 0xffff;
625         vmcb->idtr.access_rights = 0;
626
627         vmcb->efer = EFER_SVME;
628
629         /* These MSRs are undefined on reset */
630         vmcb->star = 0;
631         vmcb->lstar = 0;
632         vmcb->cstar = 0;
633         vmcb->sfmask = 0;
634         vmcb->sysenter_cs = 0;
635         vmcb->sysenter_eip = 0;
636         vmcb->sysenter_esp = 0;
637         vmcb->kerngsbase = 0;
638
639         vmcb->g_pat = PAT_RESET_VALUE;
640
641         vmcb->dr7 = 0x00000400;
642
643         /* Almost all of the guest state changed */
644         vmcb->clean_bits = 0;
645
646         ok &= svm_set_cell_config(cpu_data->cell, vmcb);
647
648         /* This is always false, but to be consistent with vmx.c... */
649         if (!ok) {
650                 panic_printk("FATAL: CPU reset failed\n");
651                 panic_stop();
652         }
653 }
654
655 void vcpu_skip_emulated_instruction(unsigned int inst_len)
656 {
657         struct per_cpu *cpu_data = this_cpu_data();
658         struct vmcb *vmcb = &cpu_data->vmcb;
659         vmcb->rip += inst_len;
660 }
661
662 static void update_efer(struct per_cpu *cpu_data)
663 {
664         struct vmcb *vmcb = &cpu_data->vmcb;
665         unsigned long efer = vmcb->efer;
666
667         if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
668                 return;
669
670         efer |= EFER_LMA;
671
672         /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
673         if ((vmcb->efer ^ efer) & EFER_LMA)
674                 vcpu_tlb_flush();
675
676         vmcb->efer = efer;
677         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
678 }
679
680 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
681 {
682         struct per_cpu *cpu_data = this_cpu_data();
683         struct vmcb *vmcb = &cpu_data->vmcb;
684
685         if (vmcb->efer & EFER_LMA) {
686                 pg_structs->root_paging = x86_64_paging;
687                 pg_structs->root_table_gphys =
688                         vmcb->cr3 & 0x000ffffffffff000UL;
689         } else if ((vmcb->cr0 & X86_CR0_PG) &&
690                    !(vmcb->cr4 & X86_CR4_PAE)) {
691                 pg_structs->root_paging = i386_paging;
692                 pg_structs->root_table_gphys =
693                         vmcb->cr3 & 0xfffff000UL;
694         } else if (!(vmcb->cr0 & X86_CR0_PG)) {
695                 /*
696                  * Can be in non-paged protected mode as well, but
697                  * the translation mechanism will stay the same ayway.
698                  */
699                 pg_structs->root_paging = realmode_paging;
700                 /*
701                  * This will make paging_get_guest_pages map the page
702                  * that also contains the bootstrap code and, thus, is
703                  * always present in a cell.
704                  */
705                 pg_structs->root_table_gphys = 0xff000;
706         } else {
707                 printk("FATAL: Unsupported paging mode\n");
708                 return false;
709         }
710         return true;
711 }
712
713 struct parse_context {
714         unsigned int remaining;
715         unsigned int size;
716         unsigned long cs_base;
717         const u8 *inst;
718 };
719
720 static bool ctx_advance(struct parse_context *ctx,
721                         unsigned long *pc,
722                         struct guest_paging_structures *pg_structs)
723 {
724         if (!ctx->size) {
725                 ctx->size = ctx->remaining;
726                 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
727                                           &ctx->size);
728                 if (!ctx->inst)
729                         return false;
730                 ctx->remaining -= ctx->size;
731                 *pc += ctx->size;
732         }
733         return true;
734 }
735
736 static bool x86_parse_mov_to_cr(struct per_cpu *cpu_data,
737                                 unsigned long pc,
738                                 unsigned char reg,
739                                 unsigned long *gpr)
740 {
741         struct guest_paging_structures pg_structs;
742         struct vmcb *vmcb = &cpu_data->vmcb;
743         struct parse_context ctx = {};
744         /* No prefixes are supported yet */
745         u8 opcodes[] = {0x0f, 0x22}, modrm;
746         bool ok = false;
747         int n;
748
749         ctx.remaining = ARRAY_SIZE(opcodes);
750         if (!vcpu_get_guest_paging_structs(&pg_structs))
751                 goto out;
752         ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
753
754         if (!ctx_advance(&ctx, &pc, &pg_structs))
755                 goto out;
756
757         for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++) {
758                 if (*(ctx.inst) != opcodes[n])
759                         goto out;
760                 if (!ctx_advance(&ctx, &pc, &pg_structs))
761                         goto out;
762         }
763
764         if (!ctx_advance(&ctx, &pc, &pg_structs))
765                 goto out;
766
767         modrm = *(ctx.inst);
768
769         if (((modrm & 0x38) >> 3) != reg)
770                 goto out;
771
772         if (gpr)
773                 *gpr = (modrm & 0x7);
774
775         ok = true;
776 out:
777         return ok;
778 }
779
780 /*
781  * XXX: The only visible reason to have this function (vmx.c consistency
782  * aside) is to prevent cells from setting invalid CD+NW combinations that
783  * result in no more than VMEXIT_INVALID. Maybe we can get along without it
784  * altogether?
785  */
786 static bool svm_handle_cr(struct registers *guest_regs,
787                           struct per_cpu *cpu_data)
788 {
789         struct vmcb *vmcb = &cpu_data->vmcb;
790         /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
791         unsigned long reg = -1, val, bits;
792         bool ok = true;
793
794         if (has_assists) {
795                 if (!(vmcb->exitinfo1 & (1UL << 63))) {
796                         panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
797                         ok = false;
798                         goto out;
799                 }
800                 reg = vmcb->exitinfo1 & 0x07;
801         } else {
802                 if (!x86_parse_mov_to_cr(cpu_data, vmcb->rip, 0, &reg)) {
803                         panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
804                         ok = false;
805                         goto out;
806                 }
807         };
808
809         if (reg == 4)
810                 val = vmcb->rsp;
811         else
812                 val = ((unsigned long *)guest_regs)[15 - reg];
813
814         vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
815         /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
816         bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
817         if ((val ^ vmcb->cr0) & bits)
818                 vcpu_tlb_flush();
819         /* TODO: better check for #GP reasons */
820         vmcb->cr0 = val & SVM_CR0_ALLOWED_BITS;
821         if (val & X86_CR0_PG)
822                 update_efer(cpu_data);
823         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
824
825 out:
826         return ok;
827 }
828
829 static bool svm_handle_msr_read(struct registers *guest_regs,
830                 struct per_cpu *cpu_data)
831 {
832         if (guest_regs->rcx >= MSR_X2APIC_BASE &&
833             guest_regs->rcx <= MSR_X2APIC_END) {
834                 vcpu_skip_emulated_instruction(X86_INST_LEN_RDMSR);
835                 x2apic_handle_read(guest_regs);
836                 return true;
837         } else {
838                 panic_printk("FATAL: Unhandled MSR read: %x\n",
839                              guest_regs->rcx);
840                 return false;
841         }
842 }
843
844 static bool svm_handle_msr_write(struct registers *guest_regs,
845                 struct per_cpu *cpu_data)
846 {
847         struct vmcb *vmcb = &cpu_data->vmcb;
848         unsigned long efer, val;
849         bool result = true;
850
851         if (guest_regs->rcx >= MSR_X2APIC_BASE &&
852             guest_regs->rcx <= MSR_X2APIC_END) {
853                 result = x2apic_handle_write(guest_regs, cpu_data);
854                 goto out;
855         }
856         if (guest_regs->rcx == MSR_EFER) {
857                 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
858                 efer = (guest_regs->rax & 0xffffffff) |
859                         (guest_regs->rdx << 32) | EFER_SVME;
860                 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
861                 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
862                         vcpu_tlb_flush();
863                 vmcb->efer = efer;
864                 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
865                 goto out;
866         }
867         if (guest_regs->rcx == MTRR_DEFTYPE) {
868                 val = (guest_regs->rax & 0xffffffff) | (guest_regs->rdx << 32);
869                 /*
870                  * Quick (and very incomplete) guest MTRRs emulation.
871                  *
872                  * For Linux, emulating MTRR Enable bit seems to be enough.
873                  * If it is cleared, we set hPAT to all zeroes, effectively
874                  * making all NPT-mapped memory UC (see APMv2, Sect. 15.25.8).
875                  *
876                  * Otherwise, default PAT value is restored. This can also
877                  * make NPT-mapped memory's type different from what Linux
878                  * expects, however.
879                  */
880                 if (val & 0x800)
881                         write_msr(MSR_IA32_PAT, PAT_RESET_VALUE);
882                 else
883                         write_msr(MSR_IA32_PAT, 0);
884                 goto out;
885         }
886
887         result = false;
888         panic_printk("FATAL: Unhandled MSR write: %x\n",
889                      guest_regs->rcx);
890 out:
891         if (result)
892                 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
893         return result;
894 }
895
896 /*
897  * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
898  * be treated separately in svm_handle_avic_access().
899  */
900 static bool svm_handle_apic_access(struct registers *guest_regs,
901                                    struct per_cpu *cpu_data)
902 {
903         struct vmcb *vmcb = &cpu_data->vmcb;
904         struct guest_paging_structures pg_structs;
905         unsigned int inst_len, offset;
906         bool is_write;
907
908         /* The caller is responsible for sanity checks */
909         is_write = !!(vmcb->exitinfo1 & 0x2);
910         offset = vmcb->exitinfo2 - XAPIC_BASE;
911
912         if (offset & 0x00f)
913                 goto out_err;
914
915         if (!vcpu_get_guest_paging_structs(&pg_structs))
916                 goto out_err;
917
918         inst_len = apic_mmio_access(guest_regs, cpu_data, vmcb->rip,
919                                     &pg_structs, offset >> 4, is_write);
920         if (!inst_len)
921                 goto out_err;
922
923         vcpu_skip_emulated_instruction(inst_len);
924         return true;
925
926 out_err:
927         panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
928                      offset, is_write);
929         return false;
930 }
931
932 static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
933 {
934         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
935                      vmcb->rsp, vmcb->rflags);
936         panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
937                      guest_regs->rbx, guest_regs->rcx);
938         panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
939                      guest_regs->rsi, guest_regs->rdi);
940         panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
941                      vmcb->cs.selector, vmcb->cs.base, vmcb->cs.access_rights,
942                      !!(vmcb->efer & EFER_LMA));
943         panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
944                      vmcb->cr3, vmcb->cr4);
945         panic_printk("EFER: %p\n", vmcb->efer);
946 }
947
948 static void svm_get_vcpu_pf_intercept(struct per_cpu *cpu_data,
949                                       struct vcpu_pf_intercept *out)
950 {
951         struct vmcb *vmcb = &cpu_data->vmcb;
952
953         out->phys_addr = vmcb->exitinfo2;
954         out->is_write = !!(vmcb->exitinfo1 & 0x2);
955 }
956
957 static void svm_get_vcpu_io_intercept(struct per_cpu *cpu_data,
958                                       struct vcpu_io_intercept *out)
959 {
960         struct vmcb *vmcb = &cpu_data->vmcb;
961         u64 exitinfo = vmcb->exitinfo1;
962
963         /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
964         out->port = (exitinfo >> 16) & 0xFFFF;
965         out->size = (exitinfo >> 4) & 0x7;
966         out->in = !!(exitinfo & 0x1);
967         out->inst_len = vmcb->exitinfo2 - vmcb->rip;
968         out->rep_or_str = !!(exitinfo & 0x0c);
969 }
970
971 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
972 {
973         struct vmcb *vmcb = &cpu_data->vmcb;
974         struct vcpu_execution_state x_state;
975         struct vcpu_pf_intercept pf;
976         struct vcpu_io_intercept io;
977         bool res = false;
978         int sipi_vector;
979
980         /* Restore GS value expected by per_cpu data accessors */
981         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
982
983         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
984         /*
985          * All guest state is marked unmodified; individual handlers must clear
986          * the bits as needed.
987          */
988         vmcb->clean_bits = 0xffffffff;
989
990         switch (vmcb->exitcode) {
991         case VMEXIT_INVALID:
992                 panic_printk("FATAL: VM-Entry failure, error %d\n",
993                              vmcb->exitcode);
994                 break;
995         case VMEXIT_NMI:
996                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
997                 /* Temporarily enable GIF to consume pending NMI */
998                 asm volatile("stgi; clgi" : : : "memory");
999                 sipi_vector = x86_handle_events(cpu_data);
1000                 if (sipi_vector >= 0) {
1001                         printk("CPU %d received SIPI, vector %x\n",
1002                                cpu_data->cpu_id, sipi_vector);
1003                         svm_vcpu_reset(cpu_data, sipi_vector);
1004                         memset(guest_regs, 0, sizeof(*guest_regs));
1005                 }
1006                 iommu_check_pending_faults(cpu_data);
1007                 return;
1008         case VMEXIT_CPUID:
1009                 /* FIXME: We are not intercepting CPUID now */
1010                 return;
1011         case VMEXIT_VMMCALL:
1012                 vcpu_vendor_get_execution_state(&x_state);
1013                 vcpu_handle_hypercall(guest_regs, &x_state);
1014                 return;
1015         case VMEXIT_CR0_SEL_WRITE:
1016                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
1017                 if (svm_handle_cr(guest_regs, cpu_data))
1018                         return;
1019                 break;
1020         case VMEXIT_MSR:
1021                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
1022                 if (!vmcb->exitinfo1)
1023                         res = svm_handle_msr_read(guest_regs, cpu_data);
1024                 else
1025                         res = svm_handle_msr_write(guest_regs, cpu_data);
1026                 if (res)
1027                         return;
1028                 break;
1029         case VMEXIT_NPF:
1030                 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
1031                      vmcb->exitinfo2 >= XAPIC_BASE &&
1032                      vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
1033                         /* APIC access in non-AVIC mode */
1034                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
1035                         if (svm_handle_apic_access(guest_regs, cpu_data))
1036                                 return;
1037                 } else {
1038                         /* General MMIO (IOAPIC, PCI etc) */
1039                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
1040                         svm_get_vcpu_pf_intercept(cpu_data, &pf);
1041                         if (vcpu_handle_pt_violation(guest_regs, &pf))
1042                                 return;
1043                 }
1044
1045                 panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
1046                              "error code is %x\n", vmcb->exitinfo2,
1047                              vmcb->exitinfo1 & 0xf);
1048                 break;
1049         case VMEXIT_XSETBV:
1050                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XSETBV]++;
1051                 if ((guest_regs->rax & X86_XCR0_FP) &&
1052                     (guest_regs->rax & ~cpuid_eax(0x0d)) == 0 &&
1053                     guest_regs->rcx == 0 && guest_regs->rdx == 0) {
1054                         vcpu_skip_emulated_instruction(X86_INST_LEN_XSETBV);
1055                         asm volatile(
1056                                 "xsetbv"
1057                                 : /* no output */
1058                                 : "a" (guest_regs->rax), "c" (0), "d" (0));
1059                         return;
1060                 }
1061                 panic_printk("FATAL: Invalid xsetbv parameters: "
1062                              "xcr[%d] = %x:%x\n", guest_regs->rcx,
1063                              guest_regs->rdx, guest_regs->rax);
1064                 break;
1065         case VMEXIT_IOIO:
1066                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
1067                 svm_get_vcpu_io_intercept(cpu_data, &io);
1068                 if (vcpu_handle_io_access(guest_regs, &io))
1069                         return;
1070                 break;
1071         /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
1072         default:
1073                 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
1074                              "exitinfo1 %p exitinfo2 %p\n",
1075                              vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
1076         }
1077         dump_guest_regs(guest_regs, vmcb);
1078         panic_park();
1079 }
1080
1081 void vcpu_park(struct per_cpu *cpu_data)
1082 {
1083         struct vmcb *vmcb = &cpu_data->vmcb;
1084
1085         svm_vcpu_reset(cpu_data, APIC_BSP_PSEUDO_SIPI);
1086         /* No need to clear VMCB Clean bit: vcpu_reset() already does this */
1087         vmcb->n_cr3 = paging_hvirt2phys(parked_mode_npt);
1088
1089         vcpu_tlb_flush();
1090 }
1091
1092 void vcpu_nmi_handler(void)
1093 {
1094 }
1095
1096 void vcpu_tlb_flush(void)
1097 {
1098         struct per_cpu *cpu_data = this_cpu_data();
1099         struct vmcb *vmcb = &cpu_data->vmcb;
1100
1101         if (has_flush_by_asid)
1102                 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
1103         else
1104                 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
1105 }
1106
1107 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
1108                               unsigned long pc, unsigned int *size)
1109 {
1110         struct per_cpu *cpu_data = this_cpu_data();
1111         struct vmcb *vmcb = &cpu_data->vmcb;
1112         unsigned long start;
1113
1114         if (has_assists) {
1115                 if (!*size)
1116                         return NULL;
1117                 start = vmcb->rip - pc;
1118                 if (start < vmcb->bytes_fetched) {
1119                         *size = vmcb->bytes_fetched - start;
1120                         return &vmcb->guest_bytes[start];
1121                 } else {
1122                         return NULL;
1123                 }
1124         } else {
1125                 return vcpu_map_inst(pg_structs, pc, size);
1126         }
1127 }
1128
1129 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
1130                                     struct vcpu_io_bitmap *iobm)
1131 {
1132         iobm->data = cell->svm.iopm;
1133         iobm->size = sizeof(cell->svm.iopm);
1134 }
1135
1136 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
1137 {
1138         struct per_cpu *cpu_data = this_cpu_data();
1139
1140         x_state->efer = cpu_data->vmcb.efer;
1141         x_state->rflags = cpu_data->vmcb.rflags;
1142         x_state->cs = cpu_data->vmcb.cs.selector;
1143         x_state->rip = cpu_data->vmcb.rip;
1144 }
1145
1146 /* GIF must be set for interrupts to be delivered (APMv2, Sect. 15.17) */
1147 void enable_irq(void)
1148 {
1149         asm volatile("stgi; sti" : : : "memory");
1150 }
1151
1152 /* Jailhouse runs with GIF cleared, so we need to restore this state */
1153 void disable_irq(void)
1154 {
1155         asm volatile("cli; clgi" : : : "memory");
1156 }