]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/svm.c
x86: Rename vcpu_handle_pt_violation to vcpu_handle_mmio_access
[jailhouse.git] / hypervisor / arch / x86 / svm.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * Based on vmx.c written by Jan Kiszka.
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  */
16
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/control.h>
20 #include <jailhouse/paging.h>
21 #include <jailhouse/printk.h>
22 #include <jailhouse/processor.h>
23 #include <jailhouse/string.h>
24 #include <jailhouse/utils.h>
25 #include <asm/apic.h>
26 #include <asm/cell.h>
27 #include <asm/control.h>
28 #include <asm/iommu.h>
29 #include <asm/paging.h>
30 #include <asm/percpu.h>
31 #include <asm/processor.h>
32 #include <asm/svm.h>
33 #include <asm/vcpu.h>
34
35 /*
36  * NW bit is ignored by all modern processors, however some
37  * combinations of NW and CD bits are prohibited by SVM (see APMv2,
38  * Sect. 15.5). To handle this, we always keep the NW bit off.
39  */
40 #define SVM_CR0_ALLOWED_BITS    (~X86_CR0_NW)
41
42 static bool has_avic, has_assists, has_flush_by_asid;
43
44 static const struct segment invalid_seg;
45
46 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
47
48 /* bit cleared: direct access allowed */
49 // TODO: convert to whitelist
50 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
51         [ SVM_MSRPM_0000 ] = {
52                 [      0/4 ...  0x017/4 ] = 0,
53                 [  0x018/4 ...  0x01b/4 ] = 0x80, /* 0x01b (w) */
54                 [  0x01c/4 ...  0x1ff/4 ] = 0,
55                 [  0x200/4 ...  0x273/4 ] = 0xaa, /* 0x200 - 0x273 (w) */
56                 [  0x274/4 ...  0x277/4 ] = 0xea, /* 0x274 - 0x276 (w), 0x277 (rw) */
57                 [  0x278/4 ...  0x2fb/4 ] = 0,
58                 [  0x2fc/4 ...  0x2ff/4 ] = 0x80, /* 0x2ff (w) */
59                 [  0x300/4 ...  0x7ff/4 ] = 0,
60                 /* x2APIC MSRs - emulated if not present */
61                 [  0x800/4 ...  0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
62                 [  0x804/4 ...  0x807/4 ] = 0,
63                 [  0x808/4 ...  0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
64                 [  0x80c/4 ...  0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
65                 [  0x810/4 ...  0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
66                 [  0x814/4 ...  0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
67                 [  0x818/4 ...  0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
68                 [  0x81c/4 ...  0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
69                 [  0x820/4 ...  0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
70                 [  0x824/4 ...  0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
71                 [  0x828/4 ...  0x82b/4 ] = 0x03, /* 0x828 (rw) */
72                 [  0x82c/4 ...  0x82f/4 ] = 0xc0, /* 0x82f (rw) */
73                 [  0x830/4 ...  0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
74                 [  0x834/4 ...  0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
75                 [  0x838/4 ...  0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
76                 [  0x83c/4 ...  0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
77                 [  0x840/4 ... 0x1fff/4 ] = 0,
78         },
79         [ SVM_MSRPM_C000 ] = {
80                 [      0/4 ...  0x07f/4 ] = 0,
81                 [  0x080/4 ...  0x083/4 ] = 0x02, /* 0x080 (w) */
82                 [  0x084/4 ... 0x1fff/4 ] = 0
83         },
84         [ SVM_MSRPM_C001 ] = {
85                 [      0/4 ... 0x1fff/4 ] = 0,
86         },
87         [ SVM_MSRPM_RESV ] = {
88                 [      0/4 ... 0x1fff/4 ] = 0,
89         }
90 };
91
92 /* This page is mapped so the code begins at 0x000ffff0 */
93 static u8 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE] = {
94         [0xff0] = 0xfa, /* 1: cli */
95         [0xff1] = 0xf4, /*    hlt */
96         [0xff2] = 0xeb,
97         [0xff3] = 0xfc  /*    jmp 1b */
98 };
99
100 static void *parked_mode_npt;
101
102 static void *avic_page;
103
104 static int svm_check_features(void)
105 {
106         /* SVM is available */
107         if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
108                 return trace_error(-ENODEV);
109
110         /* Nested paging */
111         if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
112                 return trace_error(-EIO);
113
114         /* Decode assists */
115         if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
116                 has_assists = true;
117
118         /* AVIC support */
119         if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
120                 has_avic = true;
121
122         /* TLB Flush by ASID support */
123         if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
124                 has_flush_by_asid = true;
125
126         return 0;
127 }
128
129 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
130                                      const struct desc_table_reg *dtr)
131 {
132         struct svm_segment tmp = { 0 };
133
134         if (dtr) {
135                 tmp.base = dtr->base;
136                 tmp.limit = dtr->limit & 0xffff;
137         }
138
139         *svm_segment = tmp;
140 }
141
142 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
143 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
144                                          const struct segment *segment)
145 {
146         u32 ar;
147
148         svm_segment->selector = segment->selector;
149
150         if (segment->access_rights == 0x10000) {
151                 svm_segment->access_rights = 0;
152         } else {
153                 ar = segment->access_rights;
154                 svm_segment->access_rights =
155                         ((ar & 0xf000) >> 4) | (ar & 0x00ff);
156         }
157
158         svm_segment->limit = segment->limit;
159         svm_segment->base = segment->base;
160 }
161
162 static bool svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
163 {
164         /* No real need for this function; used for consistency with vmx.c */
165         vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
166         vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
167
168         return true;
169 }
170
171 static int vmcb_setup(struct per_cpu *cpu_data)
172 {
173         struct vmcb *vmcb = &cpu_data->vmcb;
174
175         memset(vmcb, 0, sizeof(struct vmcb));
176
177         vmcb->cr0 = cpu_data->linux_cr0 & SVM_CR0_ALLOWED_BITS;
178         vmcb->cr3 = cpu_data->linux_cr3;
179         vmcb->cr4 = cpu_data->linux_cr4;
180
181         set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
182         set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
183         set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
184         set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
185         set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
186         set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
187         set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
188
189         set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
190         set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
191         set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
192
193         vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
194
195         vmcb->rflags = 0x02;
196         /* Indicate success to the caller of arch_entry */
197         vmcb->rax = 0;
198         vmcb->rsp = cpu_data->linux_sp +
199                 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
200         vmcb->rip = cpu_data->linux_ip;
201
202         vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
203         vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
204         vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
205         vmcb->star = read_msr(MSR_STAR);
206         vmcb->lstar = read_msr(MSR_LSTAR);
207         vmcb->cstar = read_msr(MSR_CSTAR);
208         vmcb->sfmask = read_msr(MSR_SFMASK);
209         vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
210
211         vmcb->dr6 = 0x00000ff0;
212         vmcb->dr7 = 0x00000400;
213
214         /* Make the hypervisor visible */
215         vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
216
217         vmcb->g_pat = cpu_data->pat;
218
219         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
220         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
221         /* TODO: Do we need this for SVM ? */
222         /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
223         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
224         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
225         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
226
227         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
228         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
229
230         vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
231
232         vmcb->np_enable = 1;
233         /* No more than one guest owns the CPU */
234         vmcb->guest_asid = 1;
235
236         /* TODO: Setup AVIC */
237
238         /* Explicitly mark all of the state as new */
239         vmcb->clean_bits = 0;
240
241         return svm_set_cell_config(cpu_data->cell, vmcb);
242 }
243
244 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
245                                      unsigned long gphys,
246                                      unsigned long flags)
247 {
248         return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
249                         gphys, flags);
250 }
251
252 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
253 {
254         /* See APMv2, Section 15.25.5 */
255         *pte = (next_pt & 0x000ffffffffff000UL) |
256                 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
257 }
258
259 int vcpu_vendor_init(void)
260 {
261         struct paging_structures parking_pt;
262         unsigned long vm_cr;
263         int err, n;
264
265         err = svm_check_features();
266         if (err)
267                 return err;
268
269         vm_cr = read_msr(MSR_VM_CR);
270         if (vm_cr & VM_CR_SVMDIS)
271                 /* SVM disabled in BIOS */
272                 return trace_error(-EPERM);
273
274         /* Nested paging is the same as the native one */
275         memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
276         for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
277                 npt_paging[n].set_next_pt = npt_set_next_pt;
278
279         /* Map guest parking code (shared between cells and CPUs) */
280         parking_pt.root_paging = npt_paging;
281         parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
282         if (!parked_mode_npt)
283                 return -ENOMEM;
284         err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
285                             PAGE_SIZE, 0x000ff000,
286                             PAGE_READONLY_FLAGS | PAGE_FLAG_US,
287                             PAGING_NON_COHERENT);
288         if (err)
289                 return err;
290
291         /* This is always false for AMD now (except in nested SVM);
292            see Sect. 16.3.1 in APMv2 */
293         if (using_x2apic) {
294                 /* allow direct x2APIC access except for ICR writes */
295                 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
296                                 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
297                 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
298         } else {
299                 if (has_avic) {
300                         avic_page = page_alloc(&remap_pool, 1);
301                         if (!avic_page)
302                                 return trace_error(-ENOMEM);
303                 }
304         }
305
306         return vcpu_cell_init(&root_cell);
307 }
308
309 int vcpu_vendor_cell_init(struct cell *cell)
310 {
311         u64 flags;
312         int err;
313
314         /* allocate iopm (two 4-K pages + 3 bits) */
315         cell->svm.iopm = page_alloc(&mem_pool, 3);
316         if (!cell->svm.iopm)
317                 return -ENOMEM;
318
319         /* build root NPT of cell */
320         cell->svm.npt_structs.root_paging = npt_paging;
321         cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
322         if (!cell->svm.npt_structs.root_table)
323                 return -ENOMEM;
324
325         if (!has_avic) {
326                 /*
327                  * Map xAPIC as is; reads are passed, writes are trapped.
328                  */
329                 flags = PAGE_READONLY_FLAGS | PAGE_FLAG_US | PAGE_FLAG_DEVICE;
330                 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
331                                     PAGE_SIZE, XAPIC_BASE,
332                                     flags,
333                                     PAGING_NON_COHERENT);
334         } else {
335                 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE;
336                 err = paging_create(&cell->svm.npt_structs,
337                                     paging_hvirt2phys(avic_page),
338                                     PAGE_SIZE, XAPIC_BASE,
339                                     flags,
340                                     PAGING_NON_COHERENT);
341         }
342
343         return err;
344 }
345
346 int vcpu_map_memory_region(struct cell *cell,
347                            const struct jailhouse_memory *mem)
348 {
349         u64 phys_start = mem->phys_start;
350         u64 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
351
352         if (mem->flags & JAILHOUSE_MEM_READ)
353                 flags |= PAGE_FLAG_PRESENT;
354         if (mem->flags & JAILHOUSE_MEM_WRITE)
355                 flags |= PAGE_FLAG_RW;
356         if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
357                 flags |= PAGE_FLAG_NOEXECUTE;
358         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
359                 phys_start = paging_hvirt2phys(&cell->comm_page);
360
361         return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
362                              mem->virt_start, flags, PAGING_NON_COHERENT);
363 }
364
365 int vcpu_unmap_memory_region(struct cell *cell,
366                              const struct jailhouse_memory *mem)
367 {
368         return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
369                               mem->size, PAGING_NON_COHERENT);
370 }
371
372 void vcpu_vendor_cell_exit(struct cell *cell)
373 {
374         paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
375                        PAGING_NON_COHERENT);
376         page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
377 }
378
379 int vcpu_init(struct per_cpu *cpu_data)
380 {
381         unsigned long efer;
382         int err;
383
384         err = svm_check_features();
385         if (err)
386                 return err;
387
388         efer = read_msr(MSR_EFER);
389         if (efer & EFER_SVME)
390                 return trace_error(-EBUSY);
391
392         efer |= EFER_SVME;
393         write_msr(MSR_EFER, efer);
394
395         cpu_data->svm_state = SVMON;
396
397         if (!vmcb_setup(cpu_data))
398                 return trace_error(-EIO);
399
400         /*
401          * APM Volume 2, 3.1.1: "When writing the CR0 register, software should
402          * set the values of reserved bits to the values found during the
403          * previous CR0 read."
404          * But we want to avoid surprises with new features unknown to us but
405          * set by Linux. So check if any assumed revered bit was set and bail
406          * out if so.
407          * Note that the APM defines all reserved CR4 bits as must-be-zero.
408          */
409         if (cpu_data->linux_cr0 & X86_CR0_RESERVED)
410                 return -EIO;
411
412         /* bring CR0 and CR4 into well-defined states */
413         write_cr0(X86_CR0_HOST_STATE);
414         write_cr4(X86_CR4_HOST_STATE);
415
416         write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
417
418         return 0;
419 }
420
421 void vcpu_exit(struct per_cpu *cpu_data)
422 {
423         unsigned long efer;
424
425         if (cpu_data->svm_state == SVMOFF)
426                 return;
427
428         cpu_data->svm_state = SVMOFF;
429
430         /* We are leaving - set the GIF */
431         asm volatile ("stgi" : : : "memory");
432
433         efer = read_msr(MSR_EFER);
434         efer &= ~EFER_SVME;
435         write_msr(MSR_EFER, efer);
436
437         write_msr(MSR_VM_HSAVE_PA, 0);
438 }
439
440 void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data)
441 {
442         unsigned long vmcb_pa, host_stack;
443
444         vmcb_pa = paging_hvirt2phys(&cpu_data->vmcb);
445         host_stack = (unsigned long)cpu_data->stack + sizeof(cpu_data->stack);
446
447         /* We enter Linux at the point arch_entry would return to as well.
448          * rax is cleared to signal success to the caller. */
449         asm volatile(
450                 "clgi\n\t"
451                 "mov (%%rdi),%%r15\n\t"
452                 "mov 0x8(%%rdi),%%r14\n\t"
453                 "mov 0x10(%%rdi),%%r13\n\t"
454                 "mov 0x18(%%rdi),%%r12\n\t"
455                 "mov 0x20(%%rdi),%%rbx\n\t"
456                 "mov 0x28(%%rdi),%%rbp\n\t"
457                 "mov %0, %%rax\n\t"
458                 "vmload %%rax\n\t"
459                 "vmrun %%rax\n\t"
460                 "vmsave %%rax\n\t"
461                 /* Restore hypervisor stack */
462                 "mov %2, %%rsp\n\t"
463                 "jmp svm_vmexit"
464                 : /* no output */
465                 : "m" (vmcb_pa), "D" (cpu_data->linux_reg), "m" (host_stack)
466                 : "memory", "r15", "r14", "r13", "r12",
467                   "rbx", "rbp", "rax", "cc");
468         __builtin_unreachable();
469 }
470
471 void __attribute__((noreturn))
472 vcpu_deactivate_vmm(struct registers *guest_regs)
473 {
474         struct per_cpu *cpu_data = this_cpu_data();
475         struct vmcb *vmcb = &cpu_data->vmcb;
476         unsigned long *stack = (unsigned long *)vmcb->rsp;
477         unsigned long linux_ip = vmcb->rip;
478
479         /*
480          * Restore the MSRs.
481          *
482          * XXX: One could argue this is better to be done in
483          * arch_cpu_restore(), however, it would require changes
484          * to cpu_data to store STAR and friends.
485          */
486         write_msr(MSR_STAR, vmcb->star);
487         write_msr(MSR_LSTAR, vmcb->lstar);
488         write_msr(MSR_CSTAR, vmcb->cstar);
489         write_msr(MSR_SFMASK, vmcb->sfmask);
490         write_msr(MSR_KERNGS_BASE, vmcb->kerngsbase);
491
492         cpu_data->linux_cr0 = vmcb->cr0;
493         cpu_data->linux_cr3 = vmcb->cr3;
494
495         cpu_data->linux_gdtr.base = vmcb->gdtr.base;
496         cpu_data->linux_gdtr.limit = vmcb->gdtr.limit;
497         cpu_data->linux_idtr.base = vmcb->idtr.base;
498         cpu_data->linux_idtr.limit = vmcb->idtr.limit;
499
500         cpu_data->linux_cs.selector = vmcb->cs.selector;
501
502         cpu_data->linux_tss.selector = vmcb->tr.selector;
503
504         cpu_data->linux_efer = vmcb->efer & (~EFER_SVME);
505         cpu_data->linux_fs.base = vmcb->fs.base;
506         cpu_data->linux_gs.base = vmcb->gs.base;
507
508         cpu_data->linux_sysenter_cs = vmcb->sysenter_cs;
509         cpu_data->linux_sysenter_eip = vmcb->sysenter_eip;
510         cpu_data->linux_sysenter_esp = vmcb->sysenter_esp;
511
512         cpu_data->linux_ds.selector = vmcb->ds.selector;
513         cpu_data->linux_es.selector = vmcb->es.selector;
514         cpu_data->linux_fs.selector = vmcb->fs.selector;
515         cpu_data->linux_gs.selector = vmcb->gs.selector;
516
517         arch_cpu_restore(cpu_data, 0);
518
519         stack--;
520         *stack = linux_ip;
521
522         asm volatile (
523                 "mov %%rbx,%%rsp\n\t"
524                 "pop %%r15\n\t"
525                 "pop %%r14\n\t"
526                 "pop %%r13\n\t"
527                 "pop %%r12\n\t"
528                 "pop %%r11\n\t"
529                 "pop %%r10\n\t"
530                 "pop %%r9\n\t"
531                 "pop %%r8\n\t"
532                 "pop %%rdi\n\t"
533                 "pop %%rsi\n\t"
534                 "pop %%rbp\n\t"
535                 "add $8,%%rsp\n\t"
536                 "pop %%rbx\n\t"
537                 "pop %%rdx\n\t"
538                 "pop %%rcx\n\t"
539                 "mov %%rax,%%rsp\n\t"
540                 "xor %%rax,%%rax\n\t"
541                 "ret"
542                 : : "a" (stack), "b" (guest_regs));
543         __builtin_unreachable();
544 }
545
546 static void svm_vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
547 {
548         struct vmcb *vmcb = &cpu_data->vmcb;
549         unsigned long val;
550         bool ok = true;
551
552         vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
553         vmcb->cr3 = 0;
554         vmcb->cr4 = 0;
555
556         vmcb->rflags = 0x02;
557
558         val = 0;
559         if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
560                 val = 0xfff0;
561                 sipi_vector = 0xf0;
562         }
563         vmcb->rip = val;
564         vmcb->rsp = 0;
565
566         vmcb->cs.selector = sipi_vector << 8;
567         vmcb->cs.base = sipi_vector << 12;
568         vmcb->cs.limit = 0xffff;
569         vmcb->cs.access_rights = 0x009b;
570
571         vmcb->ds.selector = 0;
572         vmcb->ds.base = 0;
573         vmcb->ds.limit = 0xffff;
574         vmcb->ds.access_rights = 0x0093;
575
576         vmcb->es.selector = 0;
577         vmcb->es.base = 0;
578         vmcb->es.limit = 0xffff;
579         vmcb->es.access_rights = 0x0093;
580
581         vmcb->fs.selector = 0;
582         vmcb->fs.base = 0;
583         vmcb->fs.limit = 0xffff;
584         vmcb->fs.access_rights = 0x0093;
585
586         vmcb->gs.selector = 0;
587         vmcb->gs.base = 0;
588         vmcb->gs.limit = 0xffff;
589         vmcb->gs.access_rights = 0x0093;
590
591         vmcb->ss.selector = 0;
592         vmcb->ss.base = 0;
593         vmcb->ss.limit = 0xffff;
594         vmcb->ss.access_rights = 0x0093;
595
596         vmcb->tr.selector = 0;
597         vmcb->tr.base = 0;
598         vmcb->tr.limit = 0xffff;
599         vmcb->tr.access_rights = 0x008b;
600
601         vmcb->ldtr.selector = 0;
602         vmcb->ldtr.base = 0;
603         vmcb->ldtr.limit = 0xffff;
604         vmcb->ldtr.access_rights = 0x0082;
605
606         vmcb->gdtr.selector = 0;
607         vmcb->gdtr.base = 0;
608         vmcb->gdtr.limit = 0xffff;
609         vmcb->gdtr.access_rights = 0;
610
611         vmcb->idtr.selector = 0;
612         vmcb->idtr.base = 0;
613         vmcb->idtr.limit = 0xffff;
614         vmcb->idtr.access_rights = 0;
615
616         vmcb->efer = EFER_SVME;
617
618         /* These MSRs are undefined on reset */
619         vmcb->star = 0;
620         vmcb->lstar = 0;
621         vmcb->cstar = 0;
622         vmcb->sfmask = 0;
623         vmcb->sysenter_cs = 0;
624         vmcb->sysenter_eip = 0;
625         vmcb->sysenter_esp = 0;
626         vmcb->kerngsbase = 0;
627
628         vmcb->dr7 = 0x00000400;
629
630         /* Almost all of the guest state changed */
631         vmcb->clean_bits = 0;
632
633         ok &= svm_set_cell_config(cpu_data->cell, vmcb);
634
635         /* This is always false, but to be consistent with vmx.c... */
636         if (!ok) {
637                 panic_printk("FATAL: CPU reset failed\n");
638                 panic_stop();
639         }
640 }
641
642 void vcpu_skip_emulated_instruction(unsigned int inst_len)
643 {
644         struct per_cpu *cpu_data = this_cpu_data();
645         struct vmcb *vmcb = &cpu_data->vmcb;
646         vmcb->rip += inst_len;
647 }
648
649 static void update_efer(struct per_cpu *cpu_data)
650 {
651         struct vmcb *vmcb = &cpu_data->vmcb;
652         unsigned long efer = vmcb->efer;
653
654         if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
655                 return;
656
657         efer |= EFER_LMA;
658
659         /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
660         if ((vmcb->efer ^ efer) & EFER_LMA)
661                 vcpu_tlb_flush();
662
663         vmcb->efer = efer;
664         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
665 }
666
667 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
668 {
669         struct per_cpu *cpu_data = this_cpu_data();
670         struct vmcb *vmcb = &cpu_data->vmcb;
671
672         if (vmcb->efer & EFER_LMA) {
673                 pg_structs->root_paging = x86_64_paging;
674                 pg_structs->root_table_gphys =
675                         vmcb->cr3 & 0x000ffffffffff000UL;
676         } else if ((vmcb->cr0 & X86_CR0_PG) &&
677                    !(vmcb->cr4 & X86_CR4_PAE)) {
678                 pg_structs->root_paging = i386_paging;
679                 pg_structs->root_table_gphys =
680                         vmcb->cr3 & 0xfffff000UL;
681         } else if (!(vmcb->cr0 & X86_CR0_PG)) {
682                 /*
683                  * Can be in non-paged protected mode as well, but
684                  * the translation mechanism will stay the same ayway.
685                  */
686                 pg_structs->root_paging = realmode_paging;
687                 /*
688                  * This will make paging_get_guest_pages map the page
689                  * that also contains the bootstrap code and, thus, is
690                  * always present in a cell.
691                  */
692                 pg_structs->root_table_gphys = 0xff000;
693         } else {
694                 printk("FATAL: Unsupported paging mode\n");
695                 return false;
696         }
697         return true;
698 }
699
700 void vcpu_vendor_set_guest_pat(unsigned long val)
701 {
702         struct vmcb *vmcb = &this_cpu_data()->vmcb;
703
704         vmcb->g_pat = val;
705         vmcb->clean_bits &= ~CLEAN_BITS_NP;
706 }
707
708 struct parse_context {
709         unsigned int remaining;
710         unsigned int size;
711         unsigned long cs_base;
712         const u8 *inst;
713 };
714
715 static bool ctx_advance(struct parse_context *ctx,
716                         unsigned long *pc,
717                         struct guest_paging_structures *pg_structs)
718 {
719         if (!ctx->size) {
720                 ctx->size = ctx->remaining;
721                 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
722                                           &ctx->size);
723                 if (!ctx->inst)
724                         return false;
725                 ctx->remaining -= ctx->size;
726                 *pc += ctx->size;
727         }
728         return true;
729 }
730
731 static bool x86_parse_mov_to_cr(struct per_cpu *cpu_data,
732                                 unsigned long pc,
733                                 unsigned char reg,
734                                 unsigned long *gpr)
735 {
736         struct guest_paging_structures pg_structs;
737         struct vmcb *vmcb = &cpu_data->vmcb;
738         struct parse_context ctx = {};
739         /* No prefixes are supported yet */
740         u8 opcodes[] = {0x0f, 0x22}, modrm;
741         bool ok = false;
742         int n;
743
744         ctx.remaining = ARRAY_SIZE(opcodes);
745         if (!vcpu_get_guest_paging_structs(&pg_structs))
746                 goto out;
747         ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
748
749         if (!ctx_advance(&ctx, &pc, &pg_structs))
750                 goto out;
751
752         for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++) {
753                 if (*(ctx.inst) != opcodes[n])
754                         goto out;
755                 if (!ctx_advance(&ctx, &pc, &pg_structs))
756                         goto out;
757         }
758
759         if (!ctx_advance(&ctx, &pc, &pg_structs))
760                 goto out;
761
762         modrm = *(ctx.inst);
763
764         if (((modrm & 0x38) >> 3) != reg)
765                 goto out;
766
767         if (gpr)
768                 *gpr = (modrm & 0x7);
769
770         ok = true;
771 out:
772         return ok;
773 }
774
775 /*
776  * XXX: The only visible reason to have this function (vmx.c consistency
777  * aside) is to prevent cells from setting invalid CD+NW combinations that
778  * result in no more than VMEXIT_INVALID. Maybe we can get along without it
779  * altogether?
780  */
781 static bool svm_handle_cr(struct registers *guest_regs,
782                           struct per_cpu *cpu_data)
783 {
784         struct vmcb *vmcb = &cpu_data->vmcb;
785         /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
786         unsigned long reg = -1, val, bits;
787         bool ok = true;
788
789         if (has_assists) {
790                 if (!(vmcb->exitinfo1 & (1UL << 63))) {
791                         panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
792                         ok = false;
793                         goto out;
794                 }
795                 reg = vmcb->exitinfo1 & 0x07;
796         } else {
797                 if (!x86_parse_mov_to_cr(cpu_data, vmcb->rip, 0, &reg)) {
798                         panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
799                         ok = false;
800                         goto out;
801                 }
802         };
803
804         if (reg == 4)
805                 val = vmcb->rsp;
806         else
807                 val = ((unsigned long *)guest_regs)[15 - reg];
808
809         vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
810         /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
811         bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
812         if ((val ^ vmcb->cr0) & bits)
813                 vcpu_tlb_flush();
814         /* TODO: better check for #GP reasons */
815         vmcb->cr0 = val & SVM_CR0_ALLOWED_BITS;
816         if (val & X86_CR0_PG)
817                 update_efer(cpu_data);
818         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
819
820 out:
821         return ok;
822 }
823
824 static bool svm_handle_msr_write(struct registers *guest_regs,
825                 struct per_cpu *cpu_data)
826 {
827         struct vmcb *vmcb = &cpu_data->vmcb;
828         unsigned long efer;
829
830         if (guest_regs->rcx == MSR_EFER) {
831                 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
832                 efer = get_wrmsr_value(guest_regs) | EFER_SVME;
833                 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
834                 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
835                         vcpu_tlb_flush();
836                 vmcb->efer = efer;
837                 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
838                 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
839                 return true;
840         }
841
842         return vcpu_handle_msr_write(guest_regs);
843 }
844
845 /*
846  * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
847  * be treated separately in svm_handle_avic_access().
848  */
849 static bool svm_handle_apic_access(struct registers *guest_regs,
850                                    struct per_cpu *cpu_data)
851 {
852         struct vmcb *vmcb = &cpu_data->vmcb;
853         struct guest_paging_structures pg_structs;
854         unsigned int inst_len, offset;
855         bool is_write;
856
857         /* The caller is responsible for sanity checks */
858         is_write = !!(vmcb->exitinfo1 & 0x2);
859         offset = vmcb->exitinfo2 - XAPIC_BASE;
860
861         if (offset & 0x00f)
862                 goto out_err;
863
864         if (!vcpu_get_guest_paging_structs(&pg_structs))
865                 goto out_err;
866
867         inst_len = apic_mmio_access(guest_regs, cpu_data, vmcb->rip,
868                                     &pg_structs, offset >> 4, is_write);
869         if (!inst_len)
870                 goto out_err;
871
872         vcpu_skip_emulated_instruction(inst_len);
873         return true;
874
875 out_err:
876         panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
877                      offset, is_write);
878         return false;
879 }
880
881 static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
882 {
883         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
884                      vmcb->rsp, vmcb->rflags);
885         panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
886                      guest_regs->rbx, guest_regs->rcx);
887         panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
888                      guest_regs->rsi, guest_regs->rdi);
889         panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
890                      vmcb->cs.selector, vmcb->cs.base, vmcb->cs.access_rights,
891                      !!(vmcb->efer & EFER_LMA));
892         panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
893                      vmcb->cr3, vmcb->cr4);
894         panic_printk("EFER: %p\n", vmcb->efer);
895 }
896
897 static void svm_get_vcpu_pf_intercept(struct per_cpu *cpu_data,
898                                       struct vcpu_pf_intercept *out)
899 {
900         struct vmcb *vmcb = &cpu_data->vmcb;
901
902         out->phys_addr = vmcb->exitinfo2;
903         out->is_write = !!(vmcb->exitinfo1 & 0x2);
904 }
905
906 void vcpu_vendor_get_io_intercept(struct vcpu_io_intercept *io)
907 {
908         struct vmcb *vmcb = &this_cpu_data()->vmcb;
909         u64 exitinfo = vmcb->exitinfo1;
910
911         /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
912         io->port = (exitinfo >> 16) & 0xFFFF;
913         io->size = (exitinfo >> 4) & 0x7;
914         io->in = !!(exitinfo & 0x1);
915         io->inst_len = vmcb->exitinfo2 - vmcb->rip;
916         io->rep_or_str = !!(exitinfo & 0x0c);
917 }
918
919 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
920 {
921         struct vmcb *vmcb = &cpu_data->vmcb;
922         struct vcpu_pf_intercept pf;
923         bool res = false;
924         int sipi_vector;
925
926         /* Restore GS value expected by per_cpu data accessors */
927         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
928
929         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
930         /*
931          * All guest state is marked unmodified; individual handlers must clear
932          * the bits as needed.
933          */
934         vmcb->clean_bits = 0xffffffff;
935
936         switch (vmcb->exitcode) {
937         case VMEXIT_INVALID:
938                 panic_printk("FATAL: VM-Entry failure, error %d\n",
939                              vmcb->exitcode);
940                 break;
941         case VMEXIT_NMI:
942                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
943                 /* Temporarily enable GIF to consume pending NMI */
944                 asm volatile("stgi; clgi" : : : "memory");
945                 sipi_vector = x86_handle_events(cpu_data);
946                 if (sipi_vector >= 0) {
947                         printk("CPU %d received SIPI, vector %x\n",
948                                cpu_data->cpu_id, sipi_vector);
949                         svm_vcpu_reset(cpu_data, sipi_vector);
950                         vcpu_reset(guest_regs);
951                 }
952                 iommu_check_pending_faults(cpu_data);
953                 return;
954         case VMEXIT_CPUID:
955                 /* FIXME: We are not intercepting CPUID now */
956                 return;
957         case VMEXIT_VMMCALL:
958                 vcpu_handle_hypercall(guest_regs);
959                 return;
960         case VMEXIT_CR0_SEL_WRITE:
961                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
962                 if (svm_handle_cr(guest_regs, cpu_data))
963                         return;
964                 break;
965         case VMEXIT_MSR:
966                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
967                 if (!vmcb->exitinfo1)
968                         res = vcpu_handle_msr_read(guest_regs);
969                 else
970                         res = svm_handle_msr_write(guest_regs, cpu_data);
971                 if (res)
972                         return;
973                 break;
974         case VMEXIT_NPF:
975                 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
976                      vmcb->exitinfo2 >= XAPIC_BASE &&
977                      vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
978                         /* APIC access in non-AVIC mode */
979                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
980                         if (svm_handle_apic_access(guest_regs, cpu_data))
981                                 return;
982                 } else {
983                         /* General MMIO (IOAPIC, PCI etc) */
984                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
985                         svm_get_vcpu_pf_intercept(cpu_data, &pf);
986                         if (vcpu_handle_mmio_access(guest_regs, &pf))
987                                 return;
988                 }
989
990                 panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
991                              "error code is %x\n", vmcb->exitinfo2,
992                              vmcb->exitinfo1 & 0xf);
993                 break;
994         case VMEXIT_XSETBV:
995                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XSETBV]++;
996                 if ((guest_regs->rax & X86_XCR0_FP) &&
997                     (guest_regs->rax & ~cpuid_eax(0x0d)) == 0 &&
998                     guest_regs->rcx == 0 && guest_regs->rdx == 0) {
999                         vcpu_skip_emulated_instruction(X86_INST_LEN_XSETBV);
1000                         asm volatile(
1001                                 "xsetbv"
1002                                 : /* no output */
1003                                 : "a" (guest_regs->rax), "c" (0), "d" (0));
1004                         return;
1005                 }
1006                 panic_printk("FATAL: Invalid xsetbv parameters: "
1007                              "xcr[%d] = %x:%x\n", guest_regs->rcx,
1008                              guest_regs->rdx, guest_regs->rax);
1009                 break;
1010         case VMEXIT_IOIO:
1011                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
1012                 if (vcpu_handle_io_access(guest_regs))
1013                         return;
1014                 break;
1015         /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
1016         default:
1017                 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
1018                              "exitinfo1 %p exitinfo2 %p\n",
1019                              vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
1020         }
1021         dump_guest_regs(guest_regs, vmcb);
1022         panic_park();
1023 }
1024
1025 void vcpu_park(void)
1026 {
1027         svm_vcpu_reset(this_cpu_data(), APIC_BSP_PSEUDO_SIPI);
1028         /* No need to clear VMCB Clean bit: vcpu_reset() already does this */
1029         this_cpu_data()->vmcb.n_cr3 = paging_hvirt2phys(parked_mode_npt);
1030
1031         vcpu_tlb_flush();
1032 }
1033
1034 void vcpu_nmi_handler(void)
1035 {
1036 }
1037
1038 void vcpu_tlb_flush(void)
1039 {
1040         struct per_cpu *cpu_data = this_cpu_data();
1041         struct vmcb *vmcb = &cpu_data->vmcb;
1042
1043         if (has_flush_by_asid)
1044                 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
1045         else
1046                 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
1047 }
1048
1049 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
1050                               unsigned long pc, unsigned int *size)
1051 {
1052         struct per_cpu *cpu_data = this_cpu_data();
1053         struct vmcb *vmcb = &cpu_data->vmcb;
1054         unsigned long start;
1055
1056         if (has_assists) {
1057                 if (!*size)
1058                         return NULL;
1059                 start = vmcb->rip - pc;
1060                 if (start < vmcb->bytes_fetched) {
1061                         *size = vmcb->bytes_fetched - start;
1062                         return &vmcb->guest_bytes[start];
1063                 } else {
1064                         return NULL;
1065                 }
1066         } else {
1067                 return vcpu_map_inst(pg_structs, pc, size);
1068         }
1069 }
1070
1071 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
1072                                     struct vcpu_io_bitmap *iobm)
1073 {
1074         iobm->data = cell->svm.iopm;
1075         iobm->size = sizeof(cell->svm.iopm);
1076 }
1077
1078 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
1079 {
1080         struct per_cpu *cpu_data = this_cpu_data();
1081
1082         x_state->efer = cpu_data->vmcb.efer;
1083         x_state->rflags = cpu_data->vmcb.rflags;
1084         x_state->cs = cpu_data->vmcb.cs.selector;
1085         x_state->rip = cpu_data->vmcb.rip;
1086 }
1087
1088 /* GIF must be set for interrupts to be delivered (APMv2, Sect. 15.17) */
1089 void enable_irq(void)
1090 {
1091         asm volatile("stgi; sti" : : : "memory");
1092 }
1093
1094 /* Jailhouse runs with GIF cleared, so we need to restore this state */
1095 void disable_irq(void)
1096 {
1097         asm volatile("cli; clgi" : : : "memory");
1098 }