]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/svm.c
x86: Implement vcpu_park() for AMD-V CPUs
[jailhouse.git] / hypervisor / arch / x86 / svm.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * Based on vmx.c written by Jan Kiszka.
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  */
16
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/control.h>
20 #include <jailhouse/paging.h>
21 #include <jailhouse/printk.h>
22 #include <jailhouse/processor.h>
23 #include <jailhouse/string.h>
24 #include <jailhouse/utils.h>
25 #include <asm/apic.h>
26 #include <asm/cell.h>
27 #include <asm/control.h>
28 #include <asm/iommu.h>
29 #include <asm/paging.h>
30 #include <asm/percpu.h>
31 #include <asm/processor.h>
32 #include <asm/svm.h>
33 #include <asm/vcpu.h>
34
35 /*
36  * NW bit is ignored by all modern processors, however some
37  * combinations of NW and CD bits are prohibited by SVM (see APMv2,
38  * Sect. 15.5). To handle this, we always keep the NW bit off.
39  */
40 #define SVM_CR0_CLEARED_BITS    ~X86_CR0_NW
41
42 static bool has_avic, has_assists, has_flush_by_asid;
43
44 static const struct segment invalid_seg;
45
46 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
47
48 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
49         [ SVM_MSRPM_0000 ] = {
50                 [      0/4 ...  0x017/4 ] = 0,
51                 [  0x018/4 ...  0x01b/4 ] = 0x80, /* 0x01b (w) */
52                 [  0x01c/4 ...  0x7ff/4 ] = 0,
53                 /* x2APIC MSRs - emulated if not present */
54                 [  0x800/4 ...  0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
55                 [  0x804/4 ...  0x807/4 ] = 0,
56                 [  0x808/4 ...  0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
57                 [  0x80c/4 ...  0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
58                 [  0x810/4 ...  0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
59                 [  0x814/4 ...  0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
60                 [  0x818/4 ...  0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
61                 [  0x81c/4 ...  0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
62                 [  0x820/4 ...  0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
63                 [  0x824/4 ...  0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
64                 [  0x828/4 ...  0x82b/4 ] = 0x03, /* 0x828 (rw) */
65                 [  0x82c/4 ...  0x82f/4 ] = 0xc0, /* 0x82f (rw) */
66                 [  0x830/4 ...  0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
67                 [  0x834/4 ...  0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
68                 [  0x838/4 ...  0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
69                 [  0x83c/4 ...  0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
70                 [  0x840/4 ... 0x1fff/4 ] = 0,
71         },
72         [ SVM_MSRPM_C000 ] = {
73                 [      0/4 ...  0x07f/4 ] = 0,
74                 [  0x080/4 ...  0x083/4 ] = 0x02, /* 0x080 (w) */
75                 [  0x084/4 ... 0x1fff/4 ] = 0
76         },
77         [ SVM_MSRPM_C001 ] = {
78                 [      0/4 ... 0x1fff/4 ] = 0,
79         },
80         [ SVM_MSRPM_RESV ] = {
81                 [      0/4 ... 0x1fff/4 ] = 0,
82         }
83 };
84
85 /* This page is mapped so the code begins at 0x000ffff0 */
86 static u8 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE] = {
87         [0xff0] = 0xfa, /* 1: cli */
88         [0xff1] = 0xf4, /*    hlt */
89         [0xff2] = 0xeb,
90         [0xff3] = 0xfc  /*    jmp 1b */
91 };
92
93 static void *parked_mode_npt;
94
95 static void *avic_page;
96
97 static int svm_check_features(void)
98 {
99         /* SVM is available */
100         if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
101                 return -ENODEV;
102
103         /* Nested paging */
104         if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
105                 return -EIO;
106
107         /* Decode assists */
108         if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
109                 has_assists = true;
110
111         /* AVIC support */
112         if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
113                 has_avic = true;
114
115         /* TLB Flush by ASID support */
116         if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
117                 has_flush_by_asid = true;
118
119         return 0;
120 }
121
122 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
123                                      const struct desc_table_reg *dtr)
124 {
125         struct svm_segment tmp = { 0 };
126
127         if (dtr) {
128                 tmp.base = dtr->base;
129                 tmp.limit = dtr->limit & 0xffff;
130         }
131
132         *svm_segment = tmp;
133 }
134
135 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
136 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
137                                          const struct segment *segment)
138 {
139         u32 ar;
140
141         svm_segment->selector = segment->selector;
142
143         if (segment->access_rights == 0x10000) {
144                 svm_segment->access_rights = 0;
145         } else {
146                 ar = segment->access_rights;
147                 svm_segment->access_rights =
148                         ((ar & 0xf000) >> 4) | (ar & 0x00ff);
149         }
150
151         svm_segment->limit = segment->limit;
152         svm_segment->base = segment->base;
153 }
154
155 static bool vcpu_set_cell_config(struct cell *cell, struct vmcb *vmcb)
156 {
157         /* No real need for this function; used for consistency with vmx.c */
158         vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
159         vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
160
161         return true;
162 }
163
164 static int vmcb_setup(struct per_cpu *cpu_data)
165 {
166         struct vmcb *vmcb = &cpu_data->vmcb;
167
168         memset(vmcb, 0, sizeof(struct vmcb));
169
170         vmcb->cr0 = read_cr0() & SVM_CR0_CLEARED_BITS;
171         vmcb->cr3 = cpu_data->linux_cr3;
172         vmcb->cr4 = read_cr4();
173
174         set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
175         set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
176         set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
177         set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
178         set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
179         set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
180         set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
181
182         set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
183         set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
184         set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
185
186         vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
187
188         vmcb->rflags = 0x02;
189         /* Indicate success to the caller of arch_entry */
190         vmcb->rax = 0;
191         vmcb->rsp = cpu_data->linux_sp +
192                 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
193         vmcb->rip = cpu_data->linux_ip;
194
195         vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
196         vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
197         vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
198         vmcb->star = read_msr(MSR_STAR);
199         vmcb->lstar = read_msr(MSR_LSTAR);
200         vmcb->cstar = read_msr(MSR_CSTAR);
201         vmcb->sfmask = read_msr(MSR_SFMASK);
202         vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
203
204         vmcb->dr6 = 0x00000ff0;
205         vmcb->dr7 = 0x00000400;
206
207         /* Make the hypervisor visible */
208         vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
209
210         /* Linux uses custom PAT setting */
211         vmcb->g_pat = read_msr(MSR_IA32_PAT);
212
213         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
214         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
215         /* TODO: Do we need this for SVM ? */
216         /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
217         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
218         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
219         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
220
221         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
222         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
223
224         vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
225
226         vmcb->np_enable = 1;
227         /* No more than one guest owns the CPU */
228         vmcb->guest_asid = 1;
229
230         /* TODO: Setup AVIC */
231
232         return vcpu_set_cell_config(cpu_data->cell, vmcb);
233 }
234
235 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
236                                      unsigned long gphys,
237                                      unsigned long flags)
238 {
239         return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
240                         gphys, flags);
241 }
242
243 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
244 {
245         /* See APMv2, Section 15.25.5 */
246         *pte = (next_pt & 0x000ffffffffff000UL) |
247                 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
248 }
249
250 int vcpu_vendor_init(void)
251 {
252         struct paging_structures parking_pt;
253         unsigned long vm_cr;
254         int err, n;
255
256         err = svm_check_features();
257         if (err)
258                 return err;
259
260         vm_cr = read_msr(MSR_VM_CR);
261         if (vm_cr & VM_CR_SVMDIS)
262                 /* SVM disabled in BIOS */
263                 return -EPERM;
264
265         /* Nested paging is the same as the native one */
266         memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
267         for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
268                 npt_paging[n].set_next_pt = npt_set_next_pt;
269
270         /* Map guest parking code (shared between cells and CPUs) */
271         parking_pt.root_paging = npt_paging;
272         parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
273         if (!parked_mode_npt)
274                 return -ENOMEM;
275         err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
276                             PAGE_SIZE, 0x000ff000,
277                             PAGE_READONLY_FLAGS | PAGE_FLAG_US,
278                             PAGING_NON_COHERENT);
279         if (err)
280                 return err;
281
282         /* This is always false for AMD now (except in nested SVM);
283            see Sect. 16.3.1 in APMv2 */
284         if (using_x2apic) {
285                 /* allow direct x2APIC access except for ICR writes */
286                 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
287                                 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
288                 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
289         } else {
290                 /* Enable Extended Interrupt LVT */
291                 apic_reserved_bits[0x50] = 0;
292                 if (has_avic) {
293                         avic_page = page_alloc(&remap_pool, 1);
294                         if (!avic_page)
295                                 return -ENOMEM;
296                 }
297         }
298
299         return vcpu_cell_init(&root_cell);
300 }
301
302 int vcpu_vendor_cell_init(struct cell *cell)
303 {
304         u64 flags;
305         int err;
306
307         /* allocate iopm (two 4-K pages + 3 bits) */
308         cell->svm.iopm = page_alloc(&mem_pool, 3);
309         if (!cell->svm.iopm)
310                 return -ENOMEM;
311
312         /* build root NPT of cell */
313         cell->svm.npt_structs.root_paging = npt_paging;
314         cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
315         if (!cell->svm.npt_structs.root_table)
316                 return -ENOMEM;
317
318         if (!has_avic) {
319                 /*
320                  * Map xAPIC as is; reads are passed, writes are trapped.
321                  */
322                 flags = PAGE_READONLY_FLAGS |
323                         PAGE_FLAG_US |
324                         PAGE_FLAG_UNCACHED;
325                 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
326                                     PAGE_SIZE, XAPIC_BASE,
327                                     flags,
328                                     PAGING_NON_COHERENT);
329         } else {
330                 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED;
331                 err = paging_create(&cell->svm.npt_structs,
332                                     paging_hvirt2phys(avic_page),
333                                     PAGE_SIZE, XAPIC_BASE,
334                                     flags,
335                                     PAGING_NON_COHERENT);
336         }
337
338         return err;
339 }
340
341 int vcpu_map_memory_region(struct cell *cell,
342                            const struct jailhouse_memory *mem)
343 {
344         u64 phys_start = mem->phys_start;
345         u32 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
346
347         if (mem->flags & JAILHOUSE_MEM_READ)
348                 flags |= PAGE_FLAG_PRESENT;
349         if (mem->flags & JAILHOUSE_MEM_WRITE)
350                 flags |= PAGE_FLAG_RW;
351         if (mem->flags & JAILHOUSE_MEM_EXECUTE)
352                 flags |= PAGE_FLAG_EXECUTE;
353         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
354                 phys_start = paging_hvirt2phys(&cell->comm_page);
355
356         return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
357                              mem->virt_start, flags, PAGING_NON_COHERENT);
358 }
359
360 int vcpu_unmap_memory_region(struct cell *cell,
361                              const struct jailhouse_memory *mem)
362 {
363         return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
364                               mem->size, PAGING_NON_COHERENT);
365 }
366
367 void vcpu_vendor_cell_exit(struct cell *cell)
368 {
369         paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
370                        PAGING_NON_COHERENT);
371         page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
372 }
373
374 int vcpu_init(struct per_cpu *cpu_data)
375 {
376         unsigned long efer;
377         int err;
378
379         err = svm_check_features();
380         if (err)
381                 return err;
382
383         efer = read_msr(MSR_EFER);
384         if (efer & EFER_SVME)
385                 return -EBUSY;
386
387         efer |= EFER_SVME;
388         write_msr(MSR_EFER, efer);
389
390         cpu_data->svm_state = SVMON;
391
392         if (!vmcb_setup(cpu_data))
393                 return -EIO;
394
395         write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
396
397         /* Enable Extended Interrupt LVT (xAPIC, as it is AMD-only) */
398         if (!using_x2apic)
399                 apic_reserved_bits[0x50] = 0;
400
401         return 0;
402 }
403
404 void vcpu_exit(struct per_cpu *cpu_data)
405 {
406         unsigned long efer;
407
408         if (cpu_data->svm_state == SVMOFF)
409                 return;
410
411         cpu_data->svm_state = SVMOFF;
412
413         /* We are leaving - set the GIF */
414         asm volatile ("stgi" : : : "memory");
415
416         efer = read_msr(MSR_EFER);
417         efer &= ~EFER_SVME;
418         write_msr(MSR_EFER, efer);
419
420         write_msr(MSR_VM_HSAVE_PA, 0);
421 }
422
423 void vcpu_activate_vmm(struct per_cpu *cpu_data)
424 {
425         unsigned long vmcb_pa, host_stack;
426
427         vmcb_pa = paging_hvirt2phys(&cpu_data->vmcb);
428         host_stack = (unsigned long)cpu_data->stack + sizeof(cpu_data->stack);
429
430         /* Clear host-mode MSRs */
431         write_msr(MSR_IA32_SYSENTER_CS, 0);
432         write_msr(MSR_IA32_SYSENTER_EIP, 0);
433         write_msr(MSR_IA32_SYSENTER_ESP, 0);
434
435         write_msr(MSR_STAR, 0);
436         write_msr(MSR_LSTAR, 0);
437         write_msr(MSR_CSTAR, 0);
438         write_msr(MSR_SFMASK, 0);
439         write_msr(MSR_KERNGS_BASE, 0);
440
441         /*
442          * XXX: We don't set our own PAT here but rather rely on Linux PAT
443          * settigs (and MTRRs). Potentially, a malicious Linux root cell can
444          * set values different from what we expect, and interfere with APIC
445          * virtualization in non-AVIC mode.
446          */
447
448         /* We enter Linux at the point arch_entry would return to as well.
449          * rax is cleared to signal success to the caller. */
450         asm volatile(
451                 "clgi\n\t"
452                 "mov (%%rdi),%%r15\n\t"
453                 "mov 0x8(%%rdi),%%r14\n\t"
454                 "mov 0x10(%%rdi),%%r13\n\t"
455                 "mov 0x18(%%rdi),%%r12\n\t"
456                 "mov 0x20(%%rdi),%%rbx\n\t"
457                 "mov 0x28(%%rdi),%%rbp\n\t"
458                 "mov %0, %%rax\n\t"
459                 "vmload\n\t"
460                 "vmrun\n\t"
461                 "vmsave\n\t"
462                 /* Restore hypervisor stack */
463                 "mov %2, %%rsp\n\t"
464                 "jmp svm_vmexit"
465                 : /* no output */
466                 : "m" (vmcb_pa), "D" (cpu_data->linux_reg), "m" (host_stack)
467                 : "memory", "r15", "r14", "r13", "r12",
468                   "rbx", "rbp", "rax", "cc");
469         __builtin_unreachable();
470 }
471
472 void __attribute__((noreturn))
473 vcpu_deactivate_vmm(struct registers *guest_regs)
474 {
475         struct per_cpu *cpu_data = this_cpu_data();
476         struct vmcb *vmcb = &cpu_data->vmcb;
477         unsigned long *stack = (unsigned long *)vmcb->rsp;
478         unsigned long linux_ip = vmcb->rip;
479
480         /* We are leaving - set the GIF */
481         asm volatile ("stgi" : : : "memory");
482
483         /*
484          * Restore the MSRs.
485          *
486          * XXX: One could argue this is better to be done in
487          * arch_cpu_restore(), however, it would require changes
488          * to cpu_data to store STAR and friends.
489          */
490         write_msr(MSR_STAR, vmcb->star);
491         write_msr(MSR_LSTAR, vmcb->lstar);
492         write_msr(MSR_CSTAR, vmcb->cstar);
493         write_msr(MSR_SFMASK, vmcb->sfmask);
494         write_msr(MSR_KERNGS_BASE, vmcb->kerngsbase);
495
496         cpu_data->linux_cr3 = vmcb->cr3;
497
498         cpu_data->linux_gdtr.base = vmcb->gdtr.base;
499         cpu_data->linux_gdtr.limit = vmcb->gdtr.limit;
500         cpu_data->linux_idtr.base = vmcb->idtr.base;
501         cpu_data->linux_idtr.limit = vmcb->idtr.limit;
502
503         cpu_data->linux_cs.selector = vmcb->cs.selector;
504
505         cpu_data->linux_tss.selector = vmcb->tr.selector;
506
507         cpu_data->linux_efer = vmcb->efer & (~EFER_SVME);
508         cpu_data->linux_fs.base = vmcb->fs.base;
509         cpu_data->linux_gs.base = vmcb->gs.base;
510
511         cpu_data->linux_sysenter_cs = vmcb->sysenter_cs;
512         cpu_data->linux_sysenter_eip = vmcb->sysenter_eip;
513         cpu_data->linux_sysenter_esp = vmcb->sysenter_esp;
514
515         cpu_data->linux_ds.selector = vmcb->ds.selector;
516         cpu_data->linux_es.selector = vmcb->es.selector;
517         cpu_data->linux_fs.selector = vmcb->fs.selector;
518         cpu_data->linux_gs.selector = vmcb->gs.selector;
519
520         arch_cpu_restore(cpu_data);
521
522         stack--;
523         *stack = linux_ip;
524
525         asm volatile (
526                 "mov %%rbx,%%rsp\n\t"
527                 "pop %%r15\n\t"
528                 "pop %%r14\n\t"
529                 "pop %%r13\n\t"
530                 "pop %%r12\n\t"
531                 "pop %%r11\n\t"
532                 "pop %%r10\n\t"
533                 "pop %%r9\n\t"
534                 "pop %%r8\n\t"
535                 "pop %%rdi\n\t"
536                 "pop %%rsi\n\t"
537                 "pop %%rbp\n\t"
538                 "add $8,%%rsp\n\t"
539                 "pop %%rbx\n\t"
540                 "pop %%rdx\n\t"
541                 "pop %%rcx\n\t"
542                 "mov %%rax,%%rsp\n\t"
543                 "xor %%rax,%%rax\n\t"
544                 "ret"
545                 : : "a" (stack), "b" (guest_regs));
546         __builtin_unreachable();
547 }
548
549 static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
550 {
551         struct vmcb *vmcb = &cpu_data->vmcb;
552         unsigned long val;
553         bool ok = true;
554
555         vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
556         vmcb->cr3 = 0;
557         vmcb->cr4 = 0;
558
559         vmcb->rflags = 0x02;
560
561         val = 0;
562         if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
563                 val = 0xfff0;
564                 sipi_vector = 0xf0;
565         }
566         vmcb->rip = val;
567         vmcb->rsp = 0;
568
569         vmcb->cs.selector = sipi_vector << 8;
570         vmcb->cs.base = sipi_vector << 12;
571         vmcb->cs.limit = 0xffff;
572         vmcb->cs.access_rights = 0x009b;
573
574         vmcb->ds.selector = 0;
575         vmcb->ds.base = 0;
576         vmcb->ds.limit = 0xffff;
577         vmcb->ds.access_rights = 0x0093;
578
579         vmcb->es.selector = 0;
580         vmcb->es.base = 0;
581         vmcb->es.limit = 0xffff;
582         vmcb->es.access_rights = 0x0093;
583
584         vmcb->fs.selector = 0;
585         vmcb->fs.base = 0;
586         vmcb->fs.limit = 0xffff;
587         vmcb->fs.access_rights = 0x0093;
588
589         vmcb->gs.selector = 0;
590         vmcb->gs.base = 0;
591         vmcb->gs.limit = 0xffff;
592         vmcb->gs.access_rights = 0x0093;
593
594         vmcb->ss.selector = 0;
595         vmcb->ss.base = 0;
596         vmcb->ss.limit = 0xffff;
597         vmcb->ss.access_rights = 0x0093;
598
599         vmcb->tr.selector = 0;
600         vmcb->tr.base = 0;
601         vmcb->tr.limit = 0xffff;
602         vmcb->tr.access_rights = 0x008b;
603
604         vmcb->ldtr.selector = 0;
605         vmcb->ldtr.base = 0;
606         vmcb->ldtr.limit = 0xffff;
607         vmcb->ldtr.access_rights = 0x0082;
608
609         vmcb->gdtr.selector = 0;
610         vmcb->gdtr.base = 0;
611         vmcb->gdtr.limit = 0xffff;
612         vmcb->gdtr.access_rights = 0;
613
614         vmcb->idtr.selector = 0;
615         vmcb->idtr.base = 0;
616         vmcb->idtr.limit = 0xffff;
617         vmcb->idtr.access_rights = 0;
618
619         vmcb->efer = EFER_SVME;
620
621         /* These MSRs are undefined on reset */
622         vmcb->star = 0;
623         vmcb->lstar = 0;
624         vmcb->cstar = 0;
625         vmcb->sfmask = 0;
626         vmcb->sysenter_cs = 0;
627         vmcb->sysenter_eip = 0;
628         vmcb->sysenter_esp = 0;
629         vmcb->kerngsbase = 0;
630
631         vmcb->g_pat = 0x0007040600070406;
632
633         vmcb->dr7 = 0x00000400;
634
635         ok &= vcpu_set_cell_config(cpu_data->cell, vmcb);
636
637         /* This is always false, but to be consistent with vmx.c... */
638         if (!ok) {
639                 panic_printk("FATAL: CPU reset failed\n");
640                 panic_stop();
641         }
642 }
643
644 void vcpu_skip_emulated_instruction(unsigned int inst_len)
645 {
646         struct per_cpu *cpu_data = this_cpu_data();
647         struct vmcb *vmcb = &cpu_data->vmcb;
648         vmcb->rip += inst_len;
649 }
650
651 static void update_efer(struct per_cpu *cpu_data)
652 {
653         struct vmcb *vmcb = &cpu_data->vmcb;
654         unsigned long efer = vmcb->efer;
655
656         if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
657                 return;
658
659         efer |= EFER_LMA;
660
661         /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
662         if ((vmcb->efer ^ efer) & EFER_LMA)
663                 vcpu_tlb_flush();
664
665         vmcb->efer = efer;
666 }
667
668 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
669 {
670         struct per_cpu *cpu_data = this_cpu_data();
671         struct vmcb *vmcb = &cpu_data->vmcb;
672
673         if (vmcb->efer & EFER_LMA) {
674                 pg_structs->root_paging = x86_64_paging;
675                 pg_structs->root_table_gphys =
676                         vmcb->cr3 & 0x000ffffffffff000UL;
677         } else if ((vmcb->cr0 & X86_CR0_PG) &&
678                    !(vmcb->cr4 & X86_CR4_PAE)) {
679                 pg_structs->root_paging = i386_paging;
680                 pg_structs->root_table_gphys =
681                         vmcb->cr3 & 0xfffff000UL;
682         } else if (!(vmcb->cr0 & X86_CR0_PG)) {
683                 /*
684                  * Can be in non-paged protected mode as well, but
685                  * the translation mechanism will stay the same ayway.
686                  */
687                 pg_structs->root_paging = realmode_paging;
688                 /*
689                  * This will make paging_get_guest_pages map the page
690                  * that also contains the bootstrap code and, thus, is
691                  * always present in a cell.
692                  */
693                 pg_structs->root_table_gphys = 0xff000;
694         } else {
695                 printk("FATAL: Unsupported paging mode\n");
696                 return false;
697         }
698         return true;
699 }
700
701 struct parse_context {
702         unsigned int remaining;
703         unsigned int size;
704         unsigned long cs_base;
705         const u8 *inst;
706 };
707
708 static bool ctx_advance(struct parse_context *ctx,
709                         unsigned long *pc,
710                         struct guest_paging_structures *pg_structs)
711 {
712         if (!ctx->size) {
713                 ctx->size = ctx->remaining;
714                 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
715                                           &ctx->size);
716                 if (!ctx->inst)
717                         return false;
718                 ctx->remaining -= ctx->size;
719                 *pc += ctx->size;
720         }
721         return true;
722 }
723
724 static bool x86_parse_mov_to_cr(struct per_cpu *cpu_data,
725                                 unsigned long pc,
726                                 unsigned char reg,
727                                 unsigned long *gpr)
728 {
729         struct guest_paging_structures pg_structs;
730         struct vmcb *vmcb = &cpu_data->vmcb;
731         struct parse_context ctx = {};
732         /* No prefixes are supported yet */
733         u8 opcodes[] = {0x0f, 0x22}, modrm;
734         bool ok = false;
735         int n;
736
737         ctx.remaining = ARRAY_SIZE(opcodes);
738         if (!vcpu_get_guest_paging_structs(&pg_structs))
739                 goto out;
740         ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
741
742         if (!ctx_advance(&ctx, &pc, &pg_structs))
743                 goto out;
744
745         for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++) {
746                 if (*(ctx.inst) != opcodes[n])
747                         goto out;
748                 if (!ctx_advance(&ctx, &pc, &pg_structs))
749                         goto out;
750         }
751
752         if (!ctx_advance(&ctx, &pc, &pg_structs))
753                 goto out;
754
755         modrm = *(ctx.inst);
756
757         if (((modrm & 0x38) >> 3) != reg)
758                 goto out;
759
760         if (gpr)
761                 *gpr = (modrm & 0x7);
762
763         ok = true;
764 out:
765         return ok;
766 }
767
768 /*
769  * XXX: The only visible reason to have this function (vmx.c consistency
770  * aside) is to prevent cells from setting invalid CD+NW combinations that
771  * result in no more than VMEXIT_INVALID. Maybe we can get along without it
772  * altogether?
773  */
774 static bool svm_handle_cr(struct registers *guest_regs,
775                           struct per_cpu *cpu_data)
776 {
777         struct vmcb *vmcb = &cpu_data->vmcb;
778         /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
779         unsigned long reg = -1, val, bits;
780         bool ok = true;
781
782         if (has_assists) {
783                 if (!(vmcb->exitinfo1 & (1UL << 63))) {
784                         panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
785                         ok = false;
786                         goto out;
787                 }
788                 reg = vmcb->exitinfo1 & 0x07;
789         } else {
790                 if (!x86_parse_mov_to_cr(cpu_data, vmcb->rip, 0, &reg)) {
791                         panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
792                         ok = false;
793                         goto out;
794                 }
795         };
796
797         if (reg == 4)
798                 val = vmcb->rsp;
799         else
800                 val = ((unsigned long *)guest_regs)[15 - reg];
801
802         vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
803         /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
804         bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
805         if ((val ^ vmcb->cr0) & bits)
806                 vcpu_tlb_flush();
807         /* TODO: better check for #GP reasons */
808         vmcb->cr0 = val & SVM_CR0_CLEARED_BITS;
809         if (val & X86_CR0_PG)
810                 update_efer(cpu_data);
811
812 out:
813         return ok;
814 }
815
816 static bool svm_handle_msr_read(struct registers *guest_regs,
817                 struct per_cpu *cpu_data)
818 {
819         if (guest_regs->rcx >= MSR_X2APIC_BASE &&
820             guest_regs->rcx <= MSR_X2APIC_END) {
821                 vcpu_skip_emulated_instruction(X86_INST_LEN_RDMSR);
822                 x2apic_handle_read(guest_regs);
823                 return true;
824         } else {
825                 panic_printk("FATAL: Unhandled MSR read: %x\n",
826                              guest_regs->rcx);
827                 return false;
828         }
829 }
830
831 static bool svm_handle_msr_write(struct registers *guest_regs,
832                 struct per_cpu *cpu_data)
833 {
834         struct vmcb *vmcb = &cpu_data->vmcb;
835         unsigned long efer;
836         bool result = true;
837
838         if (guest_regs->rcx >= MSR_X2APIC_BASE &&
839             guest_regs->rcx <= MSR_X2APIC_END) {
840                 result = x2apic_handle_write(guest_regs, cpu_data);
841                 goto out;
842         }
843         if (guest_regs->rcx == MSR_EFER) {
844                 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
845                 efer = (guest_regs->rax & 0xffffffff) |
846                         (guest_regs->rdx << 32) | EFER_SVME;
847                 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
848                 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
849                         vcpu_tlb_flush();
850                 vmcb->efer = efer;
851                 goto out;
852         }
853
854         result = false;
855         panic_printk("FATAL: Unhandled MSR write: %x\n",
856                      guest_regs->rcx);
857 out:
858         if (result)
859                 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
860         return result;
861 }
862
863 /*
864  * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
865  * be treated separately in svm_handle_avic_access().
866  */
867 static bool svm_handle_apic_access(struct registers *guest_regs,
868                                    struct per_cpu *cpu_data)
869 {
870         struct vmcb *vmcb = &cpu_data->vmcb;
871         struct guest_paging_structures pg_structs;
872         unsigned int inst_len, offset;
873         bool is_write;
874
875         /* The caller is responsible for sanity checks */
876         is_write = !!(vmcb->exitinfo1 & 0x2);
877         offset = vmcb->exitinfo2 - XAPIC_BASE;
878
879         if (offset & 0x00f)
880                 goto out_err;
881
882         if (!vcpu_get_guest_paging_structs(&pg_structs))
883                 goto out_err;
884
885         inst_len = apic_mmio_access(guest_regs, cpu_data, vmcb->rip,
886                                     &pg_structs, offset >> 4, is_write);
887         if (!inst_len)
888                 goto out_err;
889
890         vcpu_skip_emulated_instruction(inst_len);
891         return true;
892
893 out_err:
894         panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
895                      offset, is_write);
896         return false;
897 }
898
899 static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
900 {
901         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
902                      vmcb->rsp, vmcb->rflags);
903         panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
904                      guest_regs->rbx, guest_regs->rcx);
905         panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
906                      guest_regs->rsi, guest_regs->rdi);
907         panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
908                      vmcb->cs.selector,
909                      vmcb->cs.base,
910                      vmcb->cs.access_rights,
911                      (vmcb->efer & EFER_LMA));
912         panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
913                      vmcb->cr3, vmcb->cr4);
914         panic_printk("EFER: %p\n", vmcb->efer);
915 }
916
917 static void vcpu_vendor_get_pf_intercept(struct per_cpu *cpu_data,
918                                          struct vcpu_pf_intercept *out)
919 {
920         struct vmcb *vmcb = &cpu_data->vmcb;
921
922         out->phys_addr = vmcb->exitinfo2;
923         out->is_write = !!(vmcb->exitinfo1 & 0x2);
924 }
925
926 static void vcpu_vendor_get_io_intercept(struct per_cpu *cpu_data,
927                                          struct vcpu_io_intercept *out)
928 {
929         struct vmcb *vmcb = &cpu_data->vmcb;
930         u64 exitinfo = vmcb->exitinfo1;
931
932         /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
933         out->port = (exitinfo >> 16) & 0xFFFF;
934         out->size = (exitinfo >> 4) & 0x7;
935         out->in = !!(exitinfo & 0x1);
936         out->inst_len = vmcb->exitinfo2 - vmcb->rip;
937         out->rep_or_str = !!(exitinfo & 0x0c);
938 }
939
940 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
941 {
942         struct vmcb *vmcb = &cpu_data->vmcb;
943         struct vcpu_execution_state x_state;
944         struct vcpu_pf_intercept pf;
945         struct vcpu_io_intercept io;
946         bool res = false;
947         int sipi_vector;
948
949         /* Restore GS value expected by per_cpu data accessors */
950         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
951
952         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
953
954         switch (vmcb->exitcode) {
955         case VMEXIT_INVALID:
956                 panic_printk("FATAL: VM-Entry failure, error %d\n",
957                              vmcb->exitcode);
958                 break;
959         case VMEXIT_NMI:
960                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
961                 /* Temporarily enable GIF to consume pending NMI */
962                 asm volatile("stgi; clgi" : : : "memory");
963                 sipi_vector = x86_handle_events(cpu_data);
964                 if (sipi_vector >= 0) {
965                         printk("CPU %d received SIPI, vector %x\n",
966                                cpu_data->cpu_id, sipi_vector);
967                         vcpu_reset(cpu_data, sipi_vector);
968                         memset(guest_regs, 0, sizeof(*guest_regs));
969                 }
970                 iommu_check_pending_faults(cpu_data);
971                 return;
972         case VMEXIT_CPUID:
973                 /* FIXME: We are not intercepting CPUID now */
974                 return;
975         case VMEXIT_VMMCALL:
976                 vcpu_vendor_get_execution_state(&x_state);
977                 vcpu_handle_hypercall(guest_regs, &x_state);
978                 return;
979         case VMEXIT_CR0_SEL_WRITE:
980                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
981                 if (svm_handle_cr(guest_regs, cpu_data))
982                         return;
983                 break;
984         case VMEXIT_MSR:
985                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
986                 if (!vmcb->exitinfo1)
987                         res = svm_handle_msr_read(guest_regs, cpu_data);
988                 else
989                         res = svm_handle_msr_write(guest_regs, cpu_data);
990                 if (res)
991                         return;
992                 break;
993         case VMEXIT_NPF:
994                 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
995                      vmcb->exitinfo2 >= XAPIC_BASE &&
996                      vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
997                         /* APIC access in non-AVIC mode */
998                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
999                         if (svm_handle_apic_access(guest_regs, cpu_data))
1000                                 return;
1001                 } else {
1002                         /* General MMIO (IOAPIC, PCI etc) */
1003                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
1004                         vcpu_vendor_get_pf_intercept(cpu_data, &pf);
1005                         if (vcpu_handle_pt_violation(guest_regs, &pf))
1006                                 return;
1007                 }
1008
1009                 panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
1010                              "error code is %x\n", vmcb->exitinfo2,
1011                              vmcb->exitinfo1 & 0xf);
1012                 break;
1013         case VMEXIT_XSETBV:
1014                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XSETBV]++;
1015                 if ((guest_regs->rax & X86_XCR0_FP) &&
1016                     (guest_regs->rax & ~cpuid_eax(0x0d)) == 0 &&
1017                     guest_regs->rcx == 0 && guest_regs->rdx == 0) {
1018                         vcpu_skip_emulated_instruction(X86_INST_LEN_XSETBV);
1019                         asm volatile(
1020                                 "xsetbv"
1021                                 : /* no output */
1022                                 : "a" (guest_regs->rax), "c" (0), "d" (0));
1023                         return;
1024                 }
1025                 panic_printk("FATAL: Invalid xsetbv parameters: "
1026                              "xcr[%d] = %x:%x\n", guest_regs->rcx,
1027                              guest_regs->rdx, guest_regs->rax);
1028                 break;
1029         case VMEXIT_IOIO:
1030                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
1031                 vcpu_vendor_get_io_intercept(cpu_data, &io);
1032                 if (vcpu_handle_io_access(guest_regs, &io))
1033                         return;
1034                 break;
1035         /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
1036         default:
1037                 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
1038                              "exitinfo1 %p exitinfo2 %p\n",
1039                              vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
1040         }
1041         dump_guest_regs(guest_regs, vmcb);
1042         panic_park();
1043 }
1044
1045 void vcpu_park(struct per_cpu *cpu_data)
1046 {
1047         struct vmcb *vmcb = &cpu_data->vmcb;
1048
1049         vcpu_reset(cpu_data, APIC_BSP_PSEUDO_SIPI);
1050         vmcb->n_cr3 = paging_hvirt2phys(parked_mode_npt);
1051
1052         vcpu_tlb_flush();
1053 }
1054
1055 void vcpu_nmi_handler(struct per_cpu *cpu_data)
1056 {
1057         printk("Consuming pending NMI on CPU %d\n", cpu_data->cpu_id);
1058 }
1059
1060 void vcpu_tlb_flush(void)
1061 {
1062         struct per_cpu *cpu_data = this_cpu_data();
1063         struct vmcb *vmcb = &cpu_data->vmcb;
1064
1065         if (has_flush_by_asid)
1066                 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
1067         else
1068                 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
1069 }
1070
1071 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
1072                               unsigned long pc, unsigned int *size)
1073 {
1074         struct per_cpu *cpu_data = this_cpu_data();
1075         struct vmcb *vmcb = &cpu_data->vmcb;
1076         unsigned long start;
1077
1078         if (has_assists) {
1079                 if (!*size)
1080                         return NULL;
1081                 start = vmcb->rip - pc;
1082                 if (start < vmcb->bytes_fetched) {
1083                         *size = vmcb->bytes_fetched - start;
1084                         return &vmcb->guest_bytes[start];
1085                 } else {
1086                         return NULL;
1087                 }
1088         } else {
1089                 return vcpu_map_inst(pg_structs, pc, size);
1090         }
1091 }
1092
1093 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
1094                                     struct vcpu_io_bitmap *iobm)
1095 {
1096         iobm->data = cell->svm.iopm;
1097         iobm->size = sizeof(cell->svm.iopm);
1098 }
1099
1100 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
1101 {
1102         struct per_cpu *cpu_data = this_cpu_data();
1103
1104         x_state->efer = cpu_data->vmcb.efer;
1105         x_state->rflags = cpu_data->vmcb.rflags;
1106         x_state->cs = cpu_data->vmcb.cs.selector;
1107         x_state->rip = cpu_data->vmcb.rip;
1108 }