]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/svm.c
x86: Intercept guest XSETBV instruction in AMD-V
[jailhouse.git] / hypervisor / arch / x86 / svm.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * Based on vmx.c written by Jan Kiszka.
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  */
16
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/control.h>
20 #include <jailhouse/paging.h>
21 #include <jailhouse/printk.h>
22 #include <jailhouse/processor.h>
23 #include <jailhouse/string.h>
24 #include <jailhouse/utils.h>
25 #include <asm/apic.h>
26 #include <asm/cell.h>
27 #include <asm/control.h>
28 #include <asm/iommu.h>
29 #include <asm/paging.h>
30 #include <asm/percpu.h>
31 #include <asm/processor.h>
32 #include <asm/svm.h>
33 #include <asm/vcpu.h>
34
35 /*
36  * NW bit is ignored by all modern processors, however some
37  * combinations of NW and CD bits are prohibited by SVM (see APMv2,
38  * Sect. 15.5). To handle this, we always keep the NW bit off.
39  */
40 #define SVM_CR0_CLEARED_BITS    ~X86_CR0_NW
41
42 static bool has_avic, has_assists, has_flush_by_asid;
43
44 static const struct segment invalid_seg;
45
46 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
47
48 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
49         [ SVM_MSRPM_0000 ] = {
50                 [      0/4 ...  0x017/4 ] = 0,
51                 [  0x018/4 ...  0x01b/4 ] = 0x80, /* 0x01b (w) */
52                 [  0x01c/4 ...  0x7ff/4 ] = 0,
53                 /* x2APIC MSRs - emulated if not present */
54                 [  0x800/4 ...  0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
55                 [  0x804/4 ...  0x807/4 ] = 0,
56                 [  0x808/4 ...  0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
57                 [  0x80c/4 ...  0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
58                 [  0x810/4 ...  0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
59                 [  0x814/4 ...  0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
60                 [  0x818/4 ...  0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
61                 [  0x81c/4 ...  0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
62                 [  0x820/4 ...  0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
63                 [  0x824/4 ...  0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
64                 [  0x828/4 ...  0x82b/4 ] = 0x03, /* 0x828 (rw) */
65                 [  0x82c/4 ...  0x82f/4 ] = 0xc0, /* 0x82f (rw) */
66                 [  0x830/4 ...  0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
67                 [  0x834/4 ...  0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
68                 [  0x838/4 ...  0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
69                 [  0x83c/4 ...  0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
70                 [  0x840/4 ... 0x1fff/4 ] = 0,
71         },
72         [ SVM_MSRPM_C000 ] = {
73                 [      0/4 ...  0x07f/4 ] = 0,
74                 [  0x080/4 ...  0x083/4 ] = 0x02, /* 0x080 (w) */
75                 [  0x084/4 ... 0x1fff/4 ] = 0
76         },
77         [ SVM_MSRPM_C001 ] = {
78                 [      0/4 ... 0x1fff/4 ] = 0,
79         },
80         [ SVM_MSRPM_RESV ] = {
81                 [      0/4 ... 0x1fff/4 ] = 0,
82         }
83 };
84
85 static void *avic_page;
86
87 static int svm_check_features(void)
88 {
89         /* SVM is available */
90         if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
91                 return -ENODEV;
92
93         /* Nested paging */
94         if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
95                 return -EIO;
96
97         /* Decode assists */
98         if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
99                 has_assists = true;
100
101         /* AVIC support */
102         if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
103                 has_avic = true;
104
105         /* TLB Flush by ASID support */
106         if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
107                 has_flush_by_asid = true;
108
109         return 0;
110 }
111
112 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
113                                      const struct desc_table_reg *dtr)
114 {
115         struct svm_segment tmp = { 0 };
116
117         if (dtr) {
118                 tmp.base = dtr->base;
119                 tmp.limit = dtr->limit & 0xffff;
120         }
121
122         *svm_segment = tmp;
123 }
124
125 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
126 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
127                                          const struct segment *segment)
128 {
129         u32 ar;
130
131         svm_segment->selector = segment->selector;
132
133         if (segment->access_rights == 0x10000) {
134                 svm_segment->access_rights = 0;
135         } else {
136                 ar = segment->access_rights;
137                 svm_segment->access_rights =
138                         ((ar & 0xf000) >> 4) | (ar & 0x00ff);
139         }
140
141         svm_segment->limit = segment->limit;
142         svm_segment->base = segment->base;
143 }
144
145 static bool vcpu_set_cell_config(struct cell *cell, struct vmcb *vmcb)
146 {
147         /* No real need for this function; used for consistency with vmx.c */
148         vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
149         vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
150
151         return true;
152 }
153
154 static int vmcb_setup(struct per_cpu *cpu_data)
155 {
156         struct vmcb *vmcb = &cpu_data->vmcb;
157
158         memset(vmcb, 0, sizeof(struct vmcb));
159
160         vmcb->cr0 = read_cr0() & SVM_CR0_CLEARED_BITS;
161         vmcb->cr3 = cpu_data->linux_cr3;
162         vmcb->cr4 = read_cr4();
163
164         set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
165         set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
166         set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
167         set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
168         set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
169         set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
170         set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
171
172         set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
173         set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
174         set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
175
176         vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
177
178         vmcb->rflags = 0x02;
179         vmcb->rsp = cpu_data->linux_sp +
180                 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
181         vmcb->rip = cpu_data->linux_ip;
182
183         vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
184         vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
185         vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
186         vmcb->star = read_msr(MSR_STAR);
187         vmcb->lstar = read_msr(MSR_LSTAR);
188         vmcb->cstar = read_msr(MSR_CSTAR);
189         vmcb->sfmask = read_msr(MSR_SFMASK);
190         vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
191
192         vmcb->dr6 = 0x00000ff0;
193         vmcb->dr7 = 0x00000400;
194
195         /* Make the hypervisor visible */
196         vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
197
198         /* Linux uses custom PAT setting */
199         vmcb->g_pat = read_msr(MSR_IA32_PAT);
200
201         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
202         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
203         /* TODO: Do we need this for SVM ? */
204         /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
205         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
206         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
207         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
208
209         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
210         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
211
212         vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
213
214         vmcb->np_enable = 1;
215         /* No more than one guest owns the CPU */
216         vmcb->guest_asid = 1;
217
218         /* TODO: Setup AVIC */
219
220         return vcpu_set_cell_config(cpu_data->cell, vmcb);
221 }
222
223 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
224                                      unsigned long gphys,
225                                      unsigned long flags)
226 {
227         return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
228                         gphys, flags);
229 }
230
231 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
232 {
233         /* See APMv2, Section 15.25.5 */
234         *pte = (next_pt & 0x000ffffffffff000UL) |
235                 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
236 }
237
238 int vcpu_vendor_init(void)
239 {
240         unsigned long vm_cr;
241         int err, n;
242
243         err = svm_check_features();
244         if (err)
245                 return err;
246
247         vm_cr = read_msr(MSR_VM_CR);
248         if (vm_cr & VM_CR_SVMDIS)
249                 /* SVM disabled in BIOS */
250                 return -EPERM;
251
252         /* Nested paging is the same as the native one */
253         memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
254         for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
255                 npt_paging[n].set_next_pt = npt_set_next_pt;
256
257         /* This is always false for AMD now (except in nested SVM);
258            see Sect. 16.3.1 in APMv2 */
259         if (using_x2apic) {
260                 /* allow direct x2APIC access except for ICR writes */
261                 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
262                                 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
263                 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
264         } else {
265                 /* Enable Extended Interrupt LVT */
266                 apic_reserved_bits[0x50] = 0;
267                 if (has_avic) {
268                         avic_page = page_alloc(&remap_pool, 1);
269                         if (!avic_page)
270                                 return -ENOMEM;
271                 }
272         }
273
274         return vcpu_cell_init(&root_cell);
275 }
276
277 int vcpu_vendor_cell_init(struct cell *cell)
278 {
279         u64 flags;
280         int err;
281
282         /* allocate iopm (two 4-K pages + 3 bits) */
283         cell->svm.iopm = page_alloc(&mem_pool, 3);
284         if (!cell->svm.iopm)
285                 return -ENOMEM;
286
287         /* build root NPT of cell */
288         cell->svm.npt_structs.root_paging = npt_paging;
289         cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
290         if (!cell->svm.npt_structs.root_table)
291                 return -ENOMEM;
292
293         if (!has_avic) {
294                 /*
295                  * Map xAPIC as is; reads are passed, writes are trapped.
296                  */
297                 flags = PAGE_READONLY_FLAGS |
298                         PAGE_FLAG_US |
299                         PAGE_FLAG_UNCACHED;
300                 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
301                                     PAGE_SIZE, XAPIC_BASE,
302                                     flags,
303                                     PAGING_NON_COHERENT);
304         } else {
305                 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED;
306                 err = paging_create(&cell->svm.npt_structs,
307                                     paging_hvirt2phys(avic_page),
308                                     PAGE_SIZE, XAPIC_BASE,
309                                     flags,
310                                     PAGING_NON_COHERENT);
311         }
312
313         return err;
314 }
315
316 int vcpu_map_memory_region(struct cell *cell,
317                            const struct jailhouse_memory *mem)
318 {
319         u64 phys_start = mem->phys_start;
320         u32 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
321
322         if (mem->flags & JAILHOUSE_MEM_READ)
323                 flags |= PAGE_FLAG_PRESENT;
324         if (mem->flags & JAILHOUSE_MEM_WRITE)
325                 flags |= PAGE_FLAG_RW;
326         if (mem->flags & JAILHOUSE_MEM_EXECUTE)
327                 flags |= PAGE_FLAG_EXECUTE;
328         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
329                 phys_start = paging_hvirt2phys(&cell->comm_page);
330
331         return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
332                              mem->virt_start, flags, PAGING_NON_COHERENT);
333 }
334
335 int vcpu_unmap_memory_region(struct cell *cell,
336                              const struct jailhouse_memory *mem)
337 {
338         return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
339                               mem->size, PAGING_NON_COHERENT);
340 }
341
342 void vcpu_vendor_cell_exit(struct cell *cell)
343 {
344         paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
345                        PAGING_NON_COHERENT);
346         page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
347 }
348
349 int vcpu_init(struct per_cpu *cpu_data)
350 {
351         unsigned long efer;
352         int err;
353
354         err = svm_check_features();
355         if (err)
356                 return err;
357
358         efer = read_msr(MSR_EFER);
359         if (efer & EFER_SVME)
360                 return -EBUSY;
361
362         efer |= EFER_SVME;
363         write_msr(MSR_EFER, efer);
364
365         cpu_data->svm_state = SVMON;
366
367         if (!vmcb_setup(cpu_data))
368                 return -EIO;
369
370         write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
371
372         /* Enable Extended Interrupt LVT (xAPIC, as it is AMD-only) */
373         if (!using_x2apic)
374                 apic_reserved_bits[0x50] = 0;
375
376         return 0;
377 }
378
379 void vcpu_exit(struct per_cpu *cpu_data)
380 {
381         unsigned long efer;
382
383         if (cpu_data->svm_state == SVMOFF)
384                 return;
385
386         cpu_data->svm_state = SVMOFF;
387
388         efer = read_msr(MSR_EFER);
389         efer &= ~EFER_SVME;
390         write_msr(MSR_EFER, efer);
391
392         write_msr(MSR_VM_HSAVE_PA, 0);
393 }
394
395 void vcpu_activate_vmm(struct per_cpu *cpu_data)
396 {
397         /* TODO: Implement */
398         __builtin_unreachable();
399 }
400
401 void __attribute__((noreturn))
402 vcpu_deactivate_vmm(struct registers *guest_regs)
403 {
404         /* TODO: Implement */
405         __builtin_unreachable();
406 }
407
408 static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
409 {
410         struct vmcb *vmcb = &cpu_data->vmcb;
411         unsigned long val;
412         bool ok = true;
413
414         vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
415         vmcb->cr3 = 0;
416         vmcb->cr4 = 0;
417
418         vmcb->rflags = 0x02;
419
420         val = 0;
421         if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
422                 val = 0xfff0;
423                 sipi_vector = 0xf0;
424         }
425         vmcb->rip = val;
426         vmcb->rsp = 0;
427
428         vmcb->cs.selector = sipi_vector << 8;
429         vmcb->cs.base = sipi_vector << 12;
430         vmcb->cs.limit = 0xffff;
431         vmcb->cs.access_rights = 0x009b;
432
433         vmcb->ds.selector = 0;
434         vmcb->ds.base = 0;
435         vmcb->ds.limit = 0xffff;
436         vmcb->ds.access_rights = 0x0093;
437
438         vmcb->es.selector = 0;
439         vmcb->es.base = 0;
440         vmcb->es.limit = 0xffff;
441         vmcb->es.access_rights = 0x0093;
442
443         vmcb->fs.selector = 0;
444         vmcb->fs.base = 0;
445         vmcb->fs.limit = 0xffff;
446         vmcb->fs.access_rights = 0x0093;
447
448         vmcb->gs.selector = 0;
449         vmcb->gs.base = 0;
450         vmcb->gs.limit = 0xffff;
451         vmcb->gs.access_rights = 0x0093;
452
453         vmcb->ss.selector = 0;
454         vmcb->ss.base = 0;
455         vmcb->ss.limit = 0xffff;
456         vmcb->ss.access_rights = 0x0093;
457
458         vmcb->tr.selector = 0;
459         vmcb->tr.base = 0;
460         vmcb->tr.limit = 0xffff;
461         vmcb->tr.access_rights = 0x008b;
462
463         vmcb->ldtr.selector = 0;
464         vmcb->ldtr.base = 0;
465         vmcb->ldtr.limit = 0xffff;
466         vmcb->ldtr.access_rights = 0x0082;
467
468         vmcb->gdtr.selector = 0;
469         vmcb->gdtr.base = 0;
470         vmcb->gdtr.limit = 0xffff;
471         vmcb->gdtr.access_rights = 0;
472
473         vmcb->idtr.selector = 0;
474         vmcb->idtr.base = 0;
475         vmcb->idtr.limit = 0xffff;
476         vmcb->idtr.access_rights = 0;
477
478         vmcb->efer = EFER_SVME;
479
480         /* These MSRs are undefined on reset */
481         vmcb->star = 0;
482         vmcb->lstar = 0;
483         vmcb->cstar = 0;
484         vmcb->sfmask = 0;
485         vmcb->sysenter_cs = 0;
486         vmcb->sysenter_eip = 0;
487         vmcb->sysenter_esp = 0;
488         vmcb->kerngsbase = 0;
489
490         vmcb->g_pat = 0x0007040600070406;
491
492         vmcb->dr7 = 0x00000400;
493
494         ok &= vcpu_set_cell_config(cpu_data->cell, vmcb);
495
496         /* This is always false, but to be consistent with vmx.c... */
497         if (!ok) {
498                 panic_printk("FATAL: CPU reset failed\n");
499                 panic_stop();
500         }
501 }
502
503 void vcpu_skip_emulated_instruction(unsigned int inst_len)
504 {
505         struct per_cpu *cpu_data = this_cpu_data();
506         struct vmcb *vmcb = &cpu_data->vmcb;
507         vmcb->rip += inst_len;
508 }
509
510 static void update_efer(struct per_cpu *cpu_data)
511 {
512         struct vmcb *vmcb = &cpu_data->vmcb;
513         unsigned long efer = vmcb->efer;
514
515         if ((efer & (EFER_LME | EFER_LMA)) != EFER_LME)
516                 return;
517
518         efer |= EFER_LMA;
519
520         /* Flush TLB on LMA/LME change: See APMv2, Sect. 15.16 */
521         if ((vmcb->efer ^ efer) & EFER_LMA)
522                 vcpu_tlb_flush();
523
524         vmcb->efer = efer;
525 }
526
527 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
528 {
529         struct per_cpu *cpu_data = this_cpu_data();
530         struct vmcb *vmcb = &cpu_data->vmcb;
531
532         if (vmcb->efer & EFER_LMA) {
533                 pg_structs->root_paging = x86_64_paging;
534                 pg_structs->root_table_gphys =
535                         vmcb->cr3 & 0x000ffffffffff000UL;
536         } else if ((vmcb->cr0 & X86_CR0_PG) &&
537                    !(vmcb->cr4 & X86_CR4_PAE)) {
538                 pg_structs->root_paging = i386_paging;
539                 pg_structs->root_table_gphys =
540                         vmcb->cr3 & 0xfffff000UL;
541         } else if (!(vmcb->cr0 & X86_CR0_PG)) {
542                 /*
543                  * Can be in non-paged protected mode as well, but
544                  * the translation mechanism will stay the same ayway.
545                  */
546                 pg_structs->root_paging = realmode_paging;
547                 /*
548                  * This will make paging_get_guest_pages map the page
549                  * that also contains the bootstrap code and, thus, is
550                  * always present in a cell.
551                  */
552                 pg_structs->root_table_gphys = 0xff000;
553         } else {
554                 printk("FATAL: Unsupported paging mode\n");
555                 return false;
556         }
557         return true;
558 }
559
560 struct parse_context {
561         unsigned int remaining;
562         unsigned int size;
563         unsigned long cs_base;
564         const u8 *inst;
565 };
566
567 static bool ctx_advance(struct parse_context *ctx,
568                         unsigned long *pc,
569                         struct guest_paging_structures *pg_structs)
570 {
571         if (!ctx->size) {
572                 ctx->size = ctx->remaining;
573                 ctx->inst = vcpu_map_inst(pg_structs, ctx->cs_base + *pc,
574                                           &ctx->size);
575                 if (!ctx->inst)
576                         return false;
577                 ctx->remaining -= ctx->size;
578                 *pc += ctx->size;
579         }
580         return true;
581 }
582
583 static bool x86_parse_mov_to_cr(struct per_cpu *cpu_data,
584                                 unsigned long pc,
585                                 unsigned char reg,
586                                 unsigned long *gpr)
587 {
588         struct guest_paging_structures pg_structs;
589         struct vmcb *vmcb = &cpu_data->vmcb;
590         struct parse_context ctx = {};
591         /* No prefixes are supported yet */
592         u8 opcodes[] = {0x0f, 0x22}, modrm;
593         bool ok = false;
594         int n;
595
596         ctx.remaining = ARRAY_SIZE(opcodes);
597         if (!vcpu_get_guest_paging_structs(&pg_structs))
598                 goto out;
599         ctx.cs_base = (vmcb->efer & EFER_LMA) ? 0 : vmcb->cs.base;
600
601         if (!ctx_advance(&ctx, &pc, &pg_structs))
602                 goto out;
603
604         for (n = 0; n < ARRAY_SIZE(opcodes); n++, ctx.inst++) {
605                 if (*(ctx.inst) != opcodes[n])
606                         goto out;
607                 if (!ctx_advance(&ctx, &pc, &pg_structs))
608                         goto out;
609         }
610
611         if (!ctx_advance(&ctx, &pc, &pg_structs))
612                 goto out;
613
614         modrm = *(ctx.inst);
615
616         if (((modrm & 0x38) >> 3) != reg)
617                 goto out;
618
619         if (gpr)
620                 *gpr = (modrm & 0x7);
621
622         ok = true;
623 out:
624         return ok;
625 }
626
627 /*
628  * XXX: The only visible reason to have this function (vmx.c consistency
629  * aside) is to prevent cells from setting invalid CD+NW combinations that
630  * result in no more than VMEXIT_INVALID. Maybe we can get along without it
631  * altogether?
632  */
633 static bool svm_handle_cr(struct registers *guest_regs,
634                           struct per_cpu *cpu_data)
635 {
636         struct vmcb *vmcb = &cpu_data->vmcb;
637         /* Workaround GCC 4.8 warning on uninitialized variable 'reg' */
638         unsigned long reg = -1, val, bits;
639         bool ok = true;
640
641         if (has_assists) {
642                 if (!(vmcb->exitinfo1 & (1UL << 63))) {
643                         panic_printk("FATAL: Unsupported CR access (LMSW or CLTS)\n");
644                         ok = false;
645                         goto out;
646                 }
647                 reg = vmcb->exitinfo1 & 0x07;
648         } else {
649                 if (!x86_parse_mov_to_cr(cpu_data, vmcb->rip, 0, &reg)) {
650                         panic_printk("FATAL: Unable to parse MOV-to-CR instruction\n");
651                         ok = false;
652                         goto out;
653                 }
654         };
655
656         if (reg == 4)
657                 val = vmcb->rsp;
658         else
659                 val = ((unsigned long *)guest_regs)[15 - reg];
660
661         vcpu_skip_emulated_instruction(X86_INST_LEN_MOV_TO_CR);
662         /* Flush TLB on PG/WP/CD/NW change: See APMv2, Sect. 15.16 */
663         bits = (X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW);
664         if ((val ^ vmcb->cr0) & bits)
665                 vcpu_tlb_flush();
666         /* TODO: better check for #GP reasons */
667         vmcb->cr0 = val & SVM_CR0_CLEARED_BITS;
668         if (val & X86_CR0_PG)
669                 update_efer(cpu_data);
670
671 out:
672         return ok;
673 }
674
675 static bool svm_handle_msr_read(struct registers *guest_regs,
676                 struct per_cpu *cpu_data)
677 {
678         if (guest_regs->rcx >= MSR_X2APIC_BASE &&
679             guest_regs->rcx <= MSR_X2APIC_END) {
680                 vcpu_skip_emulated_instruction(X86_INST_LEN_RDMSR);
681                 x2apic_handle_read(guest_regs);
682                 return true;
683         } else {
684                 panic_printk("FATAL: Unhandled MSR read: %x\n",
685                              guest_regs->rcx);
686                 return false;
687         }
688 }
689
690 static bool svm_handle_msr_write(struct registers *guest_regs,
691                 struct per_cpu *cpu_data)
692 {
693         struct vmcb *vmcb = &cpu_data->vmcb;
694         unsigned long efer;
695         bool result = true;
696
697         if (guest_regs->rcx >= MSR_X2APIC_BASE &&
698             guest_regs->rcx <= MSR_X2APIC_END) {
699                 result = x2apic_handle_write(guest_regs, cpu_data);
700                 goto out;
701         }
702         if (guest_regs->rcx == MSR_EFER) {
703                 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
704                 efer = (guest_regs->rax & 0xffffffff) |
705                         (guest_regs->rdx << 32) | EFER_SVME;
706                 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
707                 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
708                         vcpu_tlb_flush();
709                 vmcb->efer = efer;
710                 goto out;
711         }
712
713         result = false;
714         panic_printk("FATAL: Unhandled MSR write: %x\n",
715                      guest_regs->rcx);
716 out:
717         if (result)
718                 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
719         return result;
720 }
721
722 /*
723  * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
724  * be treated separately in svm_handle_avic_access().
725  */
726 static bool svm_handle_apic_access(struct registers *guest_regs,
727                                    struct per_cpu *cpu_data)
728 {
729         struct vmcb *vmcb = &cpu_data->vmcb;
730         struct guest_paging_structures pg_structs;
731         unsigned int inst_len, offset;
732         bool is_write;
733
734         /* The caller is responsible for sanity checks */
735         is_write = !!(vmcb->exitinfo1 & 0x2);
736         offset = vmcb->exitinfo2 - XAPIC_BASE;
737
738         if (offset & 0x00f)
739                 goto out_err;
740
741         if (!vcpu_get_guest_paging_structs(&pg_structs))
742                 goto out_err;
743
744         inst_len = apic_mmio_access(guest_regs, cpu_data, vmcb->rip,
745                                     &pg_structs, offset >> 4, is_write);
746         if (!inst_len)
747                 goto out_err;
748
749         vcpu_skip_emulated_instruction(inst_len);
750         return true;
751
752 out_err:
753         panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
754                      offset, is_write);
755         return false;
756 }
757
758 static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
759 {
760         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
761                      vmcb->rsp, vmcb->rflags);
762         panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
763                      guest_regs->rbx, guest_regs->rcx);
764         panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
765                      guest_regs->rsi, guest_regs->rdi);
766         panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
767                      vmcb->cs.selector,
768                      vmcb->cs.base,
769                      vmcb->cs.access_rights,
770                      (vmcb->efer & EFER_LMA));
771         panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
772                      vmcb->cr3, vmcb->cr4);
773         panic_printk("EFER: %p\n", vmcb->efer);
774 }
775
776 static void vcpu_vendor_get_pf_intercept(struct per_cpu *cpu_data,
777                                          struct vcpu_pf_intercept *out)
778 {
779         struct vmcb *vmcb = &cpu_data->vmcb;
780
781         out->phys_addr = vmcb->exitinfo2;
782         out->is_write = !!(vmcb->exitinfo1 & 0x2);
783 }
784
785 static void vcpu_vendor_get_io_intercept(struct per_cpu *cpu_data,
786                                          struct vcpu_io_intercept *out)
787 {
788         struct vmcb *vmcb = &cpu_data->vmcb;
789         u64 exitinfo = vmcb->exitinfo1;
790
791         /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
792         out->port = (exitinfo >> 16) & 0xFFFF;
793         out->size = (exitinfo >> 4) & 0x7;
794         out->in = !!(exitinfo & 0x1);
795         out->inst_len = vmcb->exitinfo2 - vmcb->rip;
796         out->rep_or_str = !!(exitinfo & 0x0c);
797 }
798
799 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
800 {
801         struct vmcb *vmcb = &cpu_data->vmcb;
802         struct vcpu_execution_state x_state;
803         struct vcpu_pf_intercept pf;
804         struct vcpu_io_intercept io;
805         bool res = false;
806         int sipi_vector;
807
808         /* Restore GS value expected by per_cpu data accessors */
809         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
810
811         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
812
813         switch (vmcb->exitcode) {
814         case VMEXIT_INVALID:
815                 panic_printk("FATAL: VM-Entry failure, error %d\n",
816                              vmcb->exitcode);
817                 break;
818         case VMEXIT_NMI:
819                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
820                 /* Temporarily enable GIF to consume pending NMI */
821                 asm volatile("stgi; clgi" : : : "memory");
822                 sipi_vector = x86_handle_events(cpu_data);
823                 if (sipi_vector >= 0) {
824                         printk("CPU %d received SIPI, vector %x\n",
825                                cpu_data->cpu_id, sipi_vector);
826                         vcpu_reset(cpu_data, sipi_vector);
827                         memset(guest_regs, 0, sizeof(*guest_regs));
828                 }
829                 iommu_check_pending_faults(cpu_data);
830                 return;
831         case VMEXIT_CPUID:
832                 /* FIXME: We are not intercepting CPUID now */
833                 return;
834         case VMEXIT_VMMCALL:
835                 vcpu_vendor_get_execution_state(&x_state);
836                 vcpu_handle_hypercall(guest_regs, &x_state);
837                 return;
838         case VMEXIT_CR0_SEL_WRITE:
839                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_CR]++;
840                 if (svm_handle_cr(guest_regs, cpu_data))
841                         return;
842                 break;
843         case VMEXIT_MSR:
844                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
845                 if (!vmcb->exitinfo1)
846                         res = svm_handle_msr_read(guest_regs, cpu_data);
847                 else
848                         res = svm_handle_msr_write(guest_regs, cpu_data);
849                 if (res)
850                         return;
851                 break;
852         case VMEXIT_NPF:
853                 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
854                      vmcb->exitinfo2 >= XAPIC_BASE &&
855                      vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
856                         /* APIC access in non-AVIC mode */
857                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
858                         if (svm_handle_apic_access(guest_regs, cpu_data))
859                                 return;
860                 } else {
861                         /* General MMIO (IOAPIC, PCI etc) */
862                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
863                         vcpu_vendor_get_pf_intercept(cpu_data, &pf);
864                         if (vcpu_handle_pt_violation(guest_regs, &pf))
865                                 return;
866                 }
867
868                 panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
869                              "error code is %x\n", vmcb->exitinfo2,
870                              vmcb->exitinfo1 & 0xf);
871                 break;
872         case VMEXIT_XSETBV:
873                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XSETBV]++;
874                 if ((guest_regs->rax & X86_XCR0_FP) &&
875                     (guest_regs->rax & ~cpuid_eax(0x0d)) == 0 &&
876                     guest_regs->rcx == 0 && guest_regs->rdx == 0) {
877                         vcpu_skip_emulated_instruction(X86_INST_LEN_XSETBV);
878                         asm volatile(
879                                 "xsetbv"
880                                 : /* no output */
881                                 : "a" (guest_regs->rax), "c" (0), "d" (0));
882                         return;
883                 }
884                 panic_printk("FATAL: Invalid xsetbv parameters: "
885                              "xcr[%d] = %x:%x\n", guest_regs->rcx,
886                              guest_regs->rdx, guest_regs->rax);
887                 break;
888         case VMEXIT_IOIO:
889                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
890                 vcpu_vendor_get_io_intercept(cpu_data, &io);
891                 if (vcpu_handle_io_access(guest_regs, &io))
892                         return;
893                 break;
894         /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
895         default:
896                 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
897                              "exitinfo1 %p exitinfo2 %p\n",
898                              vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
899         }
900         dump_guest_regs(guest_regs, vmcb);
901         panic_park();
902 }
903
904 void vcpu_park(struct per_cpu *cpu_data)
905 {
906         /* TODO: Implement */
907 }
908
909 void vcpu_nmi_handler(struct per_cpu *cpu_data)
910 {
911         printk("Consuming pending NMI on CPU %d\n", cpu_data->cpu_id);
912 }
913
914 void vcpu_tlb_flush(void)
915 {
916         struct per_cpu *cpu_data = this_cpu_data();
917         struct vmcb *vmcb = &cpu_data->vmcb;
918
919         if (has_flush_by_asid)
920                 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
921         else
922                 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
923 }
924
925 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
926                               unsigned long pc, unsigned int *size)
927 {
928         struct per_cpu *cpu_data = this_cpu_data();
929         struct vmcb *vmcb = &cpu_data->vmcb;
930         unsigned long start;
931
932         if (has_assists) {
933                 if (!*size)
934                         return NULL;
935                 start = vmcb->rip - pc;
936                 if (start < vmcb->bytes_fetched) {
937                         *size = vmcb->bytes_fetched - start;
938                         return &vmcb->guest_bytes[start];
939                 } else {
940                         return NULL;
941                 }
942         } else {
943                 return vcpu_map_inst(pg_structs, pc, size);
944         }
945 }
946
947 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
948                                     struct vcpu_io_bitmap *iobm)
949 {
950         iobm->data = cell->svm.iopm;
951         iobm->size = sizeof(cell->svm.iopm);
952 }
953
954 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
955 {
956         struct per_cpu *cpu_data = this_cpu_data();
957
958         x_state->efer = cpu_data->vmcb.efer;
959         x_state->rflags = cpu_data->vmcb.rflags;
960         x_state->cs = cpu_data->vmcb.cs.selector;
961         x_state->rip = cpu_data->vmcb.rip;
962 }