]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/svm.c
x86: Embed page for EPT/NPT root_table into cell structure
[jailhouse.git] / hypervisor / arch / x86 / svm.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * Based on vmx.c written by Jan Kiszka.
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  */
16
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/control.h>
20 #include <jailhouse/paging.h>
21 #include <jailhouse/printk.h>
22 #include <jailhouse/processor.h>
23 #include <jailhouse/string.h>
24 #include <jailhouse/utils.h>
25 #include <asm/apic.h>
26 #include <asm/cell.h>
27 #include <asm/control.h>
28 #include <asm/iommu.h>
29 #include <asm/paging.h>
30 #include <asm/percpu.h>
31 #include <asm/processor.h>
32 #include <asm/svm.h>
33 #include <asm/vcpu.h>
34
35 /*
36  * NW bit is ignored by all modern processors, however some
37  * combinations of NW and CD bits are prohibited by SVM (see APMv2,
38  * Sect. 15.5). To handle this, we always keep the NW bit off.
39  */
40 #define SVM_CR0_ALLOWED_BITS    (~X86_CR0_NW)
41
42 static bool has_avic, has_assists, has_flush_by_asid;
43
44 static const struct segment invalid_seg;
45
46 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
47
48 /* bit cleared: direct access allowed */
49 // TODO: convert to whitelist
50 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
51         [ SVM_MSRPM_0000 ] = {
52                 [      0/4 ...  0x017/4 ] = 0,
53                 [  0x018/4 ...  0x01b/4 ] = 0x80, /* 0x01b (w) */
54                 [  0x01c/4 ...  0x1ff/4 ] = 0,
55                 [  0x200/4 ...  0x273/4 ] = 0xaa, /* 0x200 - 0x273 (w) */
56                 [  0x274/4 ...  0x277/4 ] = 0xea, /* 0x274 - 0x276 (w), 0x277 (rw) */
57                 [  0x278/4 ...  0x2fb/4 ] = 0,
58                 [  0x2fc/4 ...  0x2ff/4 ] = 0x80, /* 0x2ff (w) */
59                 [  0x300/4 ...  0x7ff/4 ] = 0,
60                 /* x2APIC MSRs - emulated if not present */
61                 [  0x800/4 ...  0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
62                 [  0x804/4 ...  0x807/4 ] = 0,
63                 [  0x808/4 ...  0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
64                 [  0x80c/4 ...  0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
65                 [  0x810/4 ...  0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
66                 [  0x814/4 ...  0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
67                 [  0x818/4 ...  0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
68                 [  0x81c/4 ...  0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
69                 [  0x820/4 ...  0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
70                 [  0x824/4 ...  0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
71                 [  0x828/4 ...  0x82b/4 ] = 0x03, /* 0x828 (rw) */
72                 [  0x82c/4 ...  0x82f/4 ] = 0xc0, /* 0x82f (rw) */
73                 [  0x830/4 ...  0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
74                 [  0x834/4 ...  0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
75                 [  0x838/4 ...  0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
76                 [  0x83c/4 ...  0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
77                 [  0x840/4 ... 0x1fff/4 ] = 0,
78         },
79         [ SVM_MSRPM_C000 ] = {
80                 [      0/4 ...  0x07f/4 ] = 0,
81                 [  0x080/4 ...  0x083/4 ] = 0x02, /* 0x080 (w) */
82                 [  0x084/4 ... 0x1fff/4 ] = 0
83         },
84         [ SVM_MSRPM_C001 ] = {
85                 [      0/4 ... 0x1fff/4 ] = 0,
86         },
87         [ SVM_MSRPM_RESV ] = {
88                 [      0/4 ... 0x1fff/4 ] = 0,
89         }
90 };
91
92 /* This page is mapped so the code begins at 0x000ffff0 */
93 static u8 __attribute__((aligned(PAGE_SIZE))) parking_code[PAGE_SIZE] = {
94         [0xff0] = 0xfa, /* 1: cli */
95         [0xff1] = 0xf4, /*    hlt */
96         [0xff2] = 0xeb,
97         [0xff3] = 0xfc  /*    jmp 1b */
98 };
99
100 static void *parked_mode_npt;
101
102 static void *avic_page;
103
104 static int svm_check_features(void)
105 {
106         /* SVM is available */
107         if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
108                 return trace_error(-ENODEV);
109
110         /* Nested paging */
111         if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
112                 return trace_error(-EIO);
113
114         /* Decode assists */
115         if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
116                 has_assists = true;
117
118         /* AVIC support */
119         if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
120                 has_avic = true;
121
122         /* TLB Flush by ASID support */
123         if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
124                 has_flush_by_asid = true;
125
126         return 0;
127 }
128
129 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
130                                      const struct desc_table_reg *dtr)
131 {
132         svm_segment->base = dtr->base;
133         svm_segment->limit = dtr->limit & 0xffff;
134 }
135
136 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
137                                          const struct segment *segment)
138 {
139         svm_segment->selector = segment->selector;
140         svm_segment->access_rights = ((segment->access_rights & 0xf000) >> 4) |
141                 (segment->access_rights & 0x00ff);
142         svm_segment->limit = segment->limit;
143         svm_segment->base = segment->base;
144 }
145
146 static void svm_set_cell_config(struct cell *cell, struct vmcb *vmcb)
147 {
148         vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
149         vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
150 }
151
152 static void vmcb_setup(struct per_cpu *cpu_data)
153 {
154         struct vmcb *vmcb = &cpu_data->vmcb;
155
156         memset(vmcb, 0, sizeof(struct vmcb));
157
158         vmcb->cr0 = cpu_data->linux_cr0 & SVM_CR0_ALLOWED_BITS;
159         vmcb->cr3 = cpu_data->linux_cr3;
160         vmcb->cr4 = cpu_data->linux_cr4;
161
162         set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
163         set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
164         set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
165         set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
166         set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
167         set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
168         set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
169         set_svm_segment_from_segment(&vmcb->ldtr, &invalid_seg);
170
171         set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
172         set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
173
174         vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
175
176         vmcb->rflags = 0x02;
177         /* Indicate success to the caller of arch_entry */
178         vmcb->rax = 0;
179         vmcb->rsp = cpu_data->linux_sp +
180                 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
181         vmcb->rip = cpu_data->linux_ip;
182
183         vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
184         vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
185         vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
186         vmcb->star = read_msr(MSR_STAR);
187         vmcb->lstar = read_msr(MSR_LSTAR);
188         vmcb->cstar = read_msr(MSR_CSTAR);
189         vmcb->sfmask = read_msr(MSR_SFMASK);
190         vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
191
192         vmcb->dr6 = 0x00000ff0;
193         vmcb->dr7 = 0x00000400;
194
195         /* Make the hypervisor visible */
196         vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
197
198         vmcb->g_pat = cpu_data->pat;
199
200         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
201         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
202         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID;
203         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
204         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
205         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
206
207         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
208         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
209
210         vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
211
212         vmcb->np_enable = 1;
213         /* No more than one guest owns the CPU */
214         vmcb->guest_asid = 1;
215
216         /* TODO: Setup AVIC */
217
218         /* Explicitly mark all of the state as new */
219         vmcb->clean_bits = 0;
220
221         svm_set_cell_config(cpu_data->cell, vmcb);
222 }
223
224 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
225                                      unsigned long gphys,
226                                      unsigned long flags)
227 {
228         return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
229                         gphys, flags);
230 }
231
232 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
233 {
234         /* See APMv2, Section 15.25.5 */
235         *pte = (next_pt & 0x000ffffffffff000UL) |
236                 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
237 }
238
239 int vcpu_vendor_init(void)
240 {
241         struct paging_structures parking_pt;
242         unsigned long vm_cr;
243         int err, n;
244
245         err = svm_check_features();
246         if (err)
247                 return err;
248
249         vm_cr = read_msr(MSR_VM_CR);
250         if (vm_cr & VM_CR_SVMDIS)
251                 /* SVM disabled in BIOS */
252                 return trace_error(-EPERM);
253
254         /* Nested paging is the same as the native one */
255         memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
256         for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
257                 npt_paging[n].set_next_pt = npt_set_next_pt;
258
259         /* Map guest parking code (shared between cells and CPUs) */
260         parking_pt.root_paging = npt_paging;
261         parking_pt.root_table = parked_mode_npt = page_alloc(&mem_pool, 1);
262         if (!parked_mode_npt)
263                 return -ENOMEM;
264         err = paging_create(&parking_pt, paging_hvirt2phys(parking_code),
265                             PAGE_SIZE, 0x000ff000,
266                             PAGE_READONLY_FLAGS | PAGE_FLAG_US,
267                             PAGING_NON_COHERENT);
268         if (err)
269                 return err;
270
271         /* This is always false for AMD now (except in nested SVM);
272            see Sect. 16.3.1 in APMv2 */
273         if (using_x2apic) {
274                 /* allow direct x2APIC access except for ICR writes */
275                 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
276                                 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
277                 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
278         } else {
279                 if (has_avic) {
280                         avic_page = page_alloc(&remap_pool, 1);
281                         if (!avic_page)
282                                 return trace_error(-ENOMEM);
283                 }
284         }
285
286         return vcpu_cell_init(&root_cell);
287 }
288
289 int vcpu_vendor_cell_init(struct cell *cell)
290 {
291         int err = -ENOMEM;
292         u64 flags;
293
294         /* allocate iopm (two 4-K pages + 3 bits) */
295         cell->svm.iopm = page_alloc(&mem_pool, 3);
296         if (!cell->svm.iopm)
297                 return err;
298
299         /* build root NPT of cell */
300         cell->svm.npt_structs.root_paging = npt_paging;
301         cell->svm.npt_structs.root_table = (page_table_t)cell->root_table_page;
302
303         if (!has_avic) {
304                 /*
305                  * Map xAPIC as is; reads are passed, writes are trapped.
306                  */
307                 flags = PAGE_READONLY_FLAGS | PAGE_FLAG_US | PAGE_FLAG_DEVICE;
308                 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
309                                     PAGE_SIZE, XAPIC_BASE,
310                                     flags,
311                                     PAGING_NON_COHERENT);
312         } else {
313                 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE;
314                 err = paging_create(&cell->svm.npt_structs,
315                                     paging_hvirt2phys(avic_page),
316                                     PAGE_SIZE, XAPIC_BASE,
317                                     flags,
318                                     PAGING_NON_COHERENT);
319         }
320         if (err)
321                 goto err_free_iopm;
322
323         return 0;
324
325 err_free_iopm:
326         page_free(&mem_pool, cell->svm.iopm, 3);
327
328         return err;
329 }
330
331 int vcpu_map_memory_region(struct cell *cell,
332                            const struct jailhouse_memory *mem)
333 {
334         u64 phys_start = mem->phys_start;
335         u64 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
336
337         if (mem->flags & JAILHOUSE_MEM_READ)
338                 flags |= PAGE_FLAG_PRESENT;
339         if (mem->flags & JAILHOUSE_MEM_WRITE)
340                 flags |= PAGE_FLAG_RW;
341         if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
342                 flags |= PAGE_FLAG_NOEXECUTE;
343         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
344                 phys_start = paging_hvirt2phys(&cell->comm_page);
345
346         return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
347                              mem->virt_start, flags, PAGING_NON_COHERENT);
348 }
349
350 int vcpu_unmap_memory_region(struct cell *cell,
351                              const struct jailhouse_memory *mem)
352 {
353         return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
354                               mem->size, PAGING_NON_COHERENT);
355 }
356
357 void vcpu_vendor_cell_exit(struct cell *cell)
358 {
359         paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
360                        PAGING_NON_COHERENT);
361         page_free(&mem_pool, cell->svm.iopm, 3);
362 }
363
364 int vcpu_init(struct per_cpu *cpu_data)
365 {
366         unsigned long efer;
367         int err;
368
369         err = svm_check_features();
370         if (err)
371                 return err;
372
373         efer = read_msr(MSR_EFER);
374         if (efer & EFER_SVME)
375                 return trace_error(-EBUSY);
376
377         efer |= EFER_SVME;
378         write_msr(MSR_EFER, efer);
379
380         cpu_data->svm_state = SVMON;
381
382         vmcb_setup(cpu_data);
383
384         /*
385          * APM Volume 2, 3.1.1: "When writing the CR0 register, software should
386          * set the values of reserved bits to the values found during the
387          * previous CR0 read."
388          * But we want to avoid surprises with new features unknown to us but
389          * set by Linux. So check if any assumed revered bit was set and bail
390          * out if so.
391          * Note that the APM defines all reserved CR4 bits as must-be-zero.
392          */
393         if (cpu_data->linux_cr0 & X86_CR0_RESERVED)
394                 return -EIO;
395
396         /* bring CR0 and CR4 into well-defined states */
397         write_cr0(X86_CR0_HOST_STATE);
398         write_cr4(X86_CR4_HOST_STATE);
399
400         write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
401
402         return 0;
403 }
404
405 void vcpu_exit(struct per_cpu *cpu_data)
406 {
407         unsigned long efer;
408
409         if (cpu_data->svm_state == SVMOFF)
410                 return;
411
412         cpu_data->svm_state = SVMOFF;
413
414         /* We are leaving - set the GIF */
415         asm volatile ("stgi" : : : "memory");
416
417         efer = read_msr(MSR_EFER);
418         efer &= ~EFER_SVME;
419         write_msr(MSR_EFER, efer);
420
421         write_msr(MSR_VM_HSAVE_PA, 0);
422 }
423
424 void __attribute__((noreturn)) vcpu_activate_vmm(struct per_cpu *cpu_data)
425 {
426         unsigned long vmcb_pa, host_stack;
427
428         vmcb_pa = paging_hvirt2phys(&cpu_data->vmcb);
429         host_stack = (unsigned long)cpu_data->stack + sizeof(cpu_data->stack);
430
431         /* We enter Linux at the point arch_entry would return to as well.
432          * rax is cleared to signal success to the caller. */
433         asm volatile(
434                 "clgi\n\t"
435                 "mov (%%rdi),%%r15\n\t"
436                 "mov 0x8(%%rdi),%%r14\n\t"
437                 "mov 0x10(%%rdi),%%r13\n\t"
438                 "mov 0x18(%%rdi),%%r12\n\t"
439                 "mov 0x20(%%rdi),%%rbx\n\t"
440                 "mov 0x28(%%rdi),%%rbp\n\t"
441                 "mov %2,%%rsp\n\t"
442                 "vmload %%rax\n\t"
443                 "jmp svm_vmentry"
444                 : /* no output */
445                 : "D" (cpu_data->linux_reg), "a" (vmcb_pa), "m" (host_stack));
446         __builtin_unreachable();
447 }
448
449 void __attribute__((noreturn)) vcpu_deactivate_vmm(void)
450 {
451         struct per_cpu *cpu_data = this_cpu_data();
452         struct vmcb *vmcb = &cpu_data->vmcb;
453         unsigned long *stack = (unsigned long *)vmcb->rsp;
454         unsigned long linux_ip = vmcb->rip;
455
456         cpu_data->linux_cr0 = vmcb->cr0;
457         cpu_data->linux_cr3 = vmcb->cr3;
458
459         cpu_data->linux_gdtr.base = vmcb->gdtr.base;
460         cpu_data->linux_gdtr.limit = vmcb->gdtr.limit;
461         cpu_data->linux_idtr.base = vmcb->idtr.base;
462         cpu_data->linux_idtr.limit = vmcb->idtr.limit;
463
464         cpu_data->linux_cs.selector = vmcb->cs.selector;
465
466         asm volatile("str %0" : "=m" (cpu_data->linux_tss.selector));
467
468         cpu_data->linux_efer = vmcb->efer & (~EFER_SVME);
469         cpu_data->linux_fs.base = vmcb->fs.base;
470         cpu_data->linux_gs.base = vmcb->gs.base;
471
472         cpu_data->linux_ds.selector = vmcb->ds.selector;
473         cpu_data->linux_es.selector = vmcb->es.selector;
474
475         asm volatile("mov %%fs,%0" : "=m" (cpu_data->linux_fs.selector));
476         asm volatile("mov %%gs,%0" : "=m" (cpu_data->linux_gs.selector));
477
478         arch_cpu_restore(cpu_data, 0);
479
480         stack--;
481         *stack = linux_ip;
482
483         asm volatile (
484                 "mov %%rbx,%%rsp\n\t"
485                 "pop %%r15\n\t"
486                 "pop %%r14\n\t"
487                 "pop %%r13\n\t"
488                 "pop %%r12\n\t"
489                 "pop %%r11\n\t"
490                 "pop %%r10\n\t"
491                 "pop %%r9\n\t"
492                 "pop %%r8\n\t"
493                 "pop %%rdi\n\t"
494                 "pop %%rsi\n\t"
495                 "pop %%rbp\n\t"
496                 "add $8,%%rsp\n\t"
497                 "pop %%rbx\n\t"
498                 "pop %%rdx\n\t"
499                 "pop %%rcx\n\t"
500                 "mov %%rax,%%rsp\n\t"
501                 "xor %%rax,%%rax\n\t"
502                 "ret"
503                 : : "a" (stack), "b" (&cpu_data->guest_regs));
504         __builtin_unreachable();
505 }
506
507 static void svm_vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
508 {
509         static const struct svm_segment dataseg_reset_state = {
510                 .selector = 0,
511                 .base = 0,
512                 .limit = 0xffff,
513                 .access_rights = 0x0093,
514         };
515         static const struct svm_segment dtr_reset_state = {
516                 .selector = 0,
517                 .base = 0,
518                 .limit = 0xffff,
519                 .access_rights = 0,
520         };
521         struct vmcb *vmcb = &cpu_data->vmcb;
522         unsigned long val;
523
524         vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
525         vmcb->cr3 = 0;
526         vmcb->cr4 = 0;
527
528         vmcb->rflags = 0x02;
529
530         val = 0;
531         if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
532                 val = 0xfff0;
533                 sipi_vector = 0xf0;
534         }
535         vmcb->rip = val;
536         vmcb->rsp = 0;
537
538         vmcb->cs.selector = sipi_vector << 8;
539         vmcb->cs.base = sipi_vector << 12;
540         vmcb->cs.limit = 0xffff;
541         vmcb->cs.access_rights = 0x009b;
542
543         vmcb->ds = dataseg_reset_state;
544         vmcb->es = dataseg_reset_state;
545         vmcb->fs = dataseg_reset_state;
546         vmcb->gs = dataseg_reset_state;
547         vmcb->ss = dataseg_reset_state;
548
549         vmcb->tr.selector = 0;
550         vmcb->tr.base = 0;
551         vmcb->tr.limit = 0xffff;
552         vmcb->tr.access_rights = 0x008b;
553
554         vmcb->ldtr.selector = 0;
555         vmcb->ldtr.base = 0;
556         vmcb->ldtr.limit = 0xffff;
557         vmcb->ldtr.access_rights = 0x0082;
558
559         vmcb->gdtr = dtr_reset_state;
560         vmcb->idtr = dtr_reset_state;
561
562         vmcb->efer = EFER_SVME;
563
564         /* These MSRs are undefined on reset */
565         vmcb->star = 0;
566         vmcb->lstar = 0;
567         vmcb->cstar = 0;
568         vmcb->sfmask = 0;
569         vmcb->sysenter_cs = 0;
570         vmcb->sysenter_eip = 0;
571         vmcb->sysenter_esp = 0;
572         vmcb->kerngsbase = 0;
573
574         vmcb->dr7 = 0x00000400;
575
576         /* Almost all of the guest state changed */
577         vmcb->clean_bits = 0;
578
579         svm_set_cell_config(cpu_data->cell, vmcb);
580
581         asm volatile(
582                 "vmload %%rax"
583                 : : "a" (paging_hvirt2phys(vmcb)) : "memory");
584         /* vmload overwrites GS_BASE - restore the host state */
585         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
586 }
587
588 void vcpu_skip_emulated_instruction(unsigned int inst_len)
589 {
590         this_cpu_data()->vmcb.rip += inst_len;
591 }
592
593 static void update_efer(struct vmcb *vmcb)
594 {
595         unsigned long efer = vmcb->efer;
596
597         if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
598                 return;
599
600         efer |= EFER_LMA;
601
602         /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
603         if ((vmcb->efer ^ efer) & EFER_LMA)
604                 vcpu_tlb_flush();
605
606         vmcb->efer = efer;
607         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
608 }
609
610 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
611 {
612         struct vmcb *vmcb = &this_cpu_data()->vmcb;
613
614         if (vmcb->efer & EFER_LMA) {
615                 pg_structs->root_paging = x86_64_paging;
616                 pg_structs->root_table_gphys =
617                         vmcb->cr3 & 0x000ffffffffff000UL;
618         } else if ((vmcb->cr0 & X86_CR0_PG) &&
619                    !(vmcb->cr4 & X86_CR4_PAE)) {
620                 pg_structs->root_paging = i386_paging;
621                 pg_structs->root_table_gphys =
622                         vmcb->cr3 & 0xfffff000UL;
623         } else if (!(vmcb->cr0 & X86_CR0_PG)) {
624                 /*
625                  * Can be in non-paged protected mode as well, but
626                  * the translation mechanism will stay the same ayway.
627                  */
628                 pg_structs->root_paging = realmode_paging;
629                 /*
630                  * This will make paging_get_guest_pages map the page
631                  * that also contains the bootstrap code and, thus, is
632                  * always present in a cell.
633                  */
634                 pg_structs->root_table_gphys = 0xff000;
635         } else {
636                 printk("FATAL: Unsupported paging mode\n");
637                 return false;
638         }
639         return true;
640 }
641
642 void vcpu_vendor_set_guest_pat(unsigned long val)
643 {
644         struct vmcb *vmcb = &this_cpu_data()->vmcb;
645
646         vmcb->g_pat = val;
647         vmcb->clean_bits &= ~CLEAN_BITS_NP;
648 }
649
650 struct parse_context {
651         unsigned int remaining;
652         unsigned int size;
653         unsigned long cs_base;
654         const u8 *inst;
655 };
656
657 static bool ctx_advance(struct parse_context *ctx,
658                         unsigned long *pc,
659                         struct guest_paging_structures *pg_structs)
660 {
661         if (!ctx->size) {
662                 ctx->size = ctx->remaining;
663                 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
664                                           &ctx->size);
665                 if (!ctx->inst)
666                         return false;
667                 ctx->remaining -= ctx->size;
668                 *pc += ctx->size;
669         }
670         return true;
671 }
672
673 static bool svm_parse_mov_to_cr(struct vmcb *vmcb, unsigned long pc,
674                                 unsigned char reg, unsigned long *gpr)
675 {
676         struct guest_paging_structures pg_structs;
677         struct parse_context ctx = {};
678         /* No prefixes are supported yet */
679         u8 opcodes[] = {0x0f, 0x22}, modrm;
680         int n;
681
682         ctx.remaining = ARRAY_SIZE(opcodes);
683         if (!vcpu_get_guest_paging_structs(&pg_structs))
684                 return false;
685         ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
686
687         if (!ctx_advance(&ctx, &pc, &pg_structs))
688                 return false;
689
690         for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++)
691                 if (*(ctx.inst) != opcodes[n] ||
692                     !ctx_advance(&ctx, &pc, &pg_structs))
693                         return false;
694
695         if (!ctx_advance(&ctx, &pc, &pg_structs))
696                 return false;
697
698         modrm = *(ctx.inst);
699
700         if (((modrm & 0x38) >> 3) != reg)
701                 return false;
702
703         if (gpr)
704                 *gpr = (modrm & 0x7);
705
706         return true;
707 }
708
709 /*
710  * XXX: The only visible reason to have this function (vmx.c consistency
711  * aside) is to prevent cells from setting invalid CD+NW combinations that
712  * result in no more than VMEXIT_INVALID. Maybe we can get along without it
713  * altogether?
714  */
715 static bool svm_handle_cr(struct per_cpu *cpu_data)
716 {
717         struct vmcb *vmcb = &cpu_data->vmcb;
718         /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
719         unsigned long reg = -1, val, bits;
720
721         if (has_assists) {
722                 if (!(vmcb->exitinfo1 & (1UL << 63))) {
723                         panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
724                         return false;
725                 }
726                 reg = vmcb->exitinfo1 & 0x07;
727         } else {
728                 if (!svm_parse_mov_to_cr(vmcb, vmcb->rip, 0, &reg)) {
729                         panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
730                         return false;
731                 }
732         }
733
734         if (reg == 4)
735                 val = vmcb->rsp;
736         else
737                 val = cpu_data->guest_regs.by_index[15 - reg];
738
739         vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
740         /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
741         bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
742         if ((val ^ vmcb->cr0) & bits)
743                 vcpu_tlb_flush();
744         /* TODO: better check for #GP reasons */
745         vmcb->cr0 = val & SVM_CR0_ALLOWED_BITS;
746         if (val & X86_CR0_PG)
747                 update_efer(vmcb);
748         vmcb->clean_bits &= ~CLEAN_BITS_CRX;
749
750         return true;
751 }
752
753 static bool svm_handle_msr_write(struct per_cpu *cpu_data)
754 {
755         struct vmcb *vmcb = &cpu_data->vmcb;
756         unsigned long efer;
757
758         if (cpu_data->guest_regs.rcx == MSR_EFER) {
759                 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
760                 efer = get_wrmsr_value(&cpu_data->guest_regs) | EFER_SVME;
761                 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
762                 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
763                         vcpu_tlb_flush();
764                 vmcb->efer = efer;
765                 vmcb->clean_bits &= ~CLEAN_BITS_CRX;
766                 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
767                 return true;
768         }
769
770         return vcpu_handle_msr_write();
771 }
772
773 /*
774  * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
775  * be treated separately in svm_handle_avic_access().
776  */
777 static bool svm_handle_apic_access(struct vmcb *vmcb)
778 {
779         struct guest_paging_structures pg_structs;
780         unsigned int inst_len, offset;
781         bool is_write;
782
783         /* The caller is responsible for sanity checks */
784         is_write = !!(vmcb->exitinfo1 & 0x2);
785         offset = vmcb->exitinfo2 - XAPIC_BASE;
786
787         if (offset & 0x00f)
788                 goto out_err;
789
790         if (!vcpu_get_guest_paging_structs(&pg_structs))
791                 goto out_err;
792
793         inst_len = apic_mmio_access(vmcb->rip, &pg_structs, offset >> 4,
794                                     is_write);
795         if (!inst_len)
796                 goto out_err;
797
798         vcpu_skip_emulated_instruction(inst_len);
799         return true;
800
801 out_err:
802         panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
803                      offset, is_write);
804         return false;
805 }
806
807 static void dump_guest_regs(union registers *guest_regs, struct vmcb *vmcb)
808 {
809         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
810                      vmcb->rsp, vmcb->rflags);
811         panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
812                      guest_regs->rbx, guest_regs->rcx);
813         panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
814                      guest_regs->rsi, guest_regs->rdi);
815         panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
816                      vmcb->cs.selector, vmcb->cs.base, vmcb->cs.access_rights,
817                      !!(vmcb->efer & EFER_LMA));
818         panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
819                      vmcb->cr3, vmcb->cr4);
820         panic_printk("EFER: %p\n", vmcb->efer);
821 }
822
823 void vcpu_vendor_get_io_intercept(struct vcpu_io_intercept *io)
824 {
825         struct vmcb *vmcb = &this_cpu_data()->vmcb;
826         u64 exitinfo = vmcb->exitinfo1;
827
828         /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
829         io->port = (exitinfo >> 16) & 0xFFFF;
830         io->size = (exitinfo >> 4) & 0x7;
831         io->in = !!(exitinfo & 0x1);
832         io->inst_len = vmcb->exitinfo2 - vmcb->rip;
833         io->rep_or_str = !!(exitinfo & 0x0c);
834 }
835
836 void vcpu_vendor_get_mmio_intercept(struct vcpu_mmio_intercept *mmio)
837 {
838         struct vmcb *vmcb = &this_cpu_data()->vmcb;
839
840         mmio->phys_addr = vmcb->exitinfo2;
841         mmio->is_write = !!(vmcb->exitinfo1 & 0x2);
842 }
843
844 void vcpu_handle_exit(struct per_cpu *cpu_data)
845 {
846         struct vmcb *vmcb = &cpu_data->vmcb;
847         bool res = false;
848         int sipi_vector;
849
850         vmcb->gs.base = read_msr(MSR_GS_BASE);
851
852         /* Restore GS value expected by per_cpu data accessors */
853         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
854
855         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
856         /*
857          * All guest state is marked unmodified; individual handlers must clear
858          * the bits as needed.
859          */
860         vmcb->clean_bits = 0xffffffff;
861
862         switch (vmcb->exitcode) {
863         case VMEXIT_INVALID:
864                 panic_printk("FATAL: VM-Entry failure, error %d\n",
865                              vmcb->exitcode);
866                 break;
867         case VMEXIT_NMI:
868                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
869                 /* Temporarily enable GIF to consume pending NMI */
870                 asm volatile("stgi; clgi" : : : "memory");
871                 sipi_vector = x86_handle_events(cpu_data);
872                 if (sipi_vector >= 0) {
873                         printk("CPU %d received SIPI, vector %x\n",
874                                cpu_data->cpu_id, sipi_vector);
875                         svm_vcpu_reset(cpu_data, sipi_vector);
876                         vcpu_reset(sipi_vector == APIC_BSP_PSEUDO_SIPI);
877                 }
878                 iommu_check_pending_faults();
879                 goto vmentry;
880         case VMEXIT_VMMCALL:
881                 vcpu_handle_hypercall();
882                 goto vmentry;
883         case VMEXIT_CR0_SEL_WRITE:
884                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
885                 if (svm_handle_cr(cpu_data))
886                         goto vmentry;
887                 break;
888         case VMEXIT_CPUID:
889                 vcpu_handle_cpuid();
890                 goto vmentry;
891         case VMEXIT_MSR:
892                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
893                 if (!vmcb->exitinfo1)
894                         res = vcpu_handle_msr_read();
895                 else
896                         res = svm_handle_msr_write(cpu_data);
897                 if (res)
898                         goto vmentry;
899                 break;
900         case VMEXIT_NPF:
901                 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
902                      vmcb->exitinfo2 >= XAPIC_BASE &&
903                      vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
904                         /* APIC access in non-AVIC mode */
905                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
906                         if (svm_handle_apic_access(vmcb))
907                                 goto vmentry;
908                 } else {
909                         /* General MMIO (IOAPIC, PCI etc) */
910                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
911                         if (vcpu_handle_mmio_access())
912                                 goto vmentry;
913                 }
914
915                 panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
916                              "error code is %x\n", vmcb->exitinfo2,
917                              vmcb->exitinfo1 & 0xf);
918                 break;
919         case VMEXIT_XSETBV:
920                 if (vcpu_handle_xsetbv())
921                         goto vmentry;
922                 break;
923         case VMEXIT_IOIO:
924                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
925                 if (vcpu_handle_io_access())
926                         goto vmentry;
927                 break;
928         /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
929         default:
930                 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
931                              "exitinfo1 %p exitinfo2 %p\n",
932                              vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
933         }
934         dump_guest_regs(&cpu_data->guest_regs, vmcb);
935         panic_park();
936
937 vmentry:
938         write_msr(MSR_GS_BASE, vmcb->gs.base);
939 }
940
941 void vcpu_park(void)
942 {
943         svm_vcpu_reset(this_cpu_data(), APIC_BSP_PSEUDO_SIPI);
944         /* No need to clear VMCB Clean bit: vcpu_reset() already does this */
945         this_cpu_data()->vmcb.n_cr3 = paging_hvirt2phys(parked_mode_npt);
946
947         vcpu_tlb_flush();
948 }
949
950 void vcpu_nmi_handler(void)
951 {
952 }
953
954 void vcpu_tlb_flush(void)
955 {
956         struct vmcb *vmcb = &this_cpu_data()->vmcb;
957
958         if (has_flush_by_asid)
959                 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
960         else
961                 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
962 }
963
964 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
965                               unsigned long pc, unsigned int *size)
966 {
967         struct vmcb *vmcb = &this_cpu_data()->vmcb;
968         unsigned long start;
969
970         if (has_assists) {
971                 if (!*size)
972                         return NULL;
973                 start = vmcb->rip - pc;
974                 if (start < vmcb->bytes_fetched) {
975                         *size = vmcb->bytes_fetched - start;
976                         return &vmcb->guest_bytes[start];
977                 } else {
978                         return NULL;
979                 }
980         } else {
981                 return vcpu_map_inst(pg_structs, pc, size);
982         }
983 }
984
985 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
986                                     struct vcpu_io_bitmap *iobm)
987 {
988         iobm->data = cell->svm.iopm;
989         iobm->size = sizeof(cell->svm.iopm);
990 }
991
992 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
993 {
994         struct vmcb *vmcb = &this_cpu_data()->vmcb;
995
996         x_state->efer = vmcb->efer;
997         x_state->rflags = vmcb->rflags;
998         x_state->cs = vmcb->cs.selector;
999         x_state->rip = vmcb->rip;
1000 }
1001
1002 /* GIF must be set for interrupts to be delivered (APMv2, Sect. 15.17) */
1003 void enable_irq(void)
1004 {
1005         asm volatile("stgi; sti" : : : "memory");
1006 }
1007
1008 /* Jailhouse runs with GIF cleared, so we need to restore this state */
1009 void disable_irq(void)
1010 {
1011         asm volatile("cli; clgi" : : : "memory");
1012 }