]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/svm.c
x86: Implement AMD-V NMI handler and CPU reset
[jailhouse.git] / hypervisor / arch / x86 / svm.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * Based on vmx.c written by Jan Kiszka.
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  */
16
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/control.h>
20 #include <jailhouse/paging.h>
21 #include <jailhouse/printk.h>
22 #include <jailhouse/processor.h>
23 #include <jailhouse/string.h>
24 #include <asm/apic.h>
25 #include <asm/cell.h>
26 #include <asm/control.h>
27 #include <asm/iommu.h>
28 #include <asm/paging.h>
29 #include <asm/percpu.h>
30 #include <asm/processor.h>
31 #include <asm/svm.h>
32 #include <asm/vcpu.h>
33
34 /*
35  * NW bit is ignored by all modern processors, however some
36  * combinations of NW and CD bits are prohibited by SVM (see APMv2,
37  * Sect. 15.5). To handle this, we always keep the NW bit off.
38  */
39 #define SVM_CR0_CLEARED_BITS    ~X86_CR0_NW
40
41 static bool has_avic, has_assists, has_flush_by_asid;
42
43 static const struct segment invalid_seg;
44
45 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
46
47 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
48         [ SVM_MSRPM_0000 ] = {
49                 [      0/4 ...  0x017/4 ] = 0,
50                 [  0x018/4 ...  0x01b/4 ] = 0x80, /* 0x01b (w) */
51                 [  0x01c/4 ...  0x7ff/4 ] = 0,
52                 /* x2APIC MSRs - emulated if not present */
53                 [  0x800/4 ...  0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
54                 [  0x804/4 ...  0x807/4 ] = 0,
55                 [  0x808/4 ...  0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
56                 [  0x80c/4 ...  0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
57                 [  0x810/4 ...  0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
58                 [  0x814/4 ...  0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
59                 [  0x818/4 ...  0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
60                 [  0x81c/4 ...  0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
61                 [  0x820/4 ...  0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
62                 [  0x824/4 ...  0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
63                 [  0x828/4 ...  0x82b/4 ] = 0x03, /* 0x828 (rw) */
64                 [  0x82c/4 ...  0x82f/4 ] = 0xc0, /* 0x82f (rw) */
65                 [  0x830/4 ...  0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
66                 [  0x834/4 ...  0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
67                 [  0x838/4 ...  0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
68                 [  0x83c/4 ...  0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
69                 [  0x840/4 ... 0x1fff/4 ] = 0,
70         },
71         [ SVM_MSRPM_C000 ] = {
72                 [      0/4 ...  0x07f/4 ] = 0,
73                 [  0x080/4 ...  0x083/4 ] = 0x02, /* 0x080 (w) */
74                 [  0x084/4 ... 0x1fff/4 ] = 0
75         },
76         [ SVM_MSRPM_C001 ] = {
77                 [      0/4 ... 0x1fff/4 ] = 0,
78         },
79         [ SVM_MSRPM_RESV ] = {
80                 [      0/4 ... 0x1fff/4 ] = 0,
81         }
82 };
83
84 static void *avic_page;
85
86 static int svm_check_features(void)
87 {
88         /* SVM is available */
89         if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
90                 return -ENODEV;
91
92         /* Nested paging */
93         if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
94                 return -EIO;
95
96         /* Decode assists */
97         if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
98                 has_assists = true;
99
100         /* AVIC support */
101         if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
102                 has_avic = true;
103
104         /* TLB Flush by ASID support */
105         if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
106                 has_flush_by_asid = true;
107
108         return 0;
109 }
110
111 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
112                                      const struct desc_table_reg *dtr)
113 {
114         struct svm_segment tmp = { 0 };
115
116         if (dtr) {
117                 tmp.base = dtr->base;
118                 tmp.limit = dtr->limit & 0xffff;
119         }
120
121         *svm_segment = tmp;
122 }
123
124 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
125 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
126                                          const struct segment *segment)
127 {
128         u32 ar;
129
130         svm_segment->selector = segment->selector;
131
132         if (segment->access_rights == 0x10000) {
133                 svm_segment->access_rights = 0;
134         } else {
135                 ar = segment->access_rights;
136                 svm_segment->access_rights =
137                         ((ar & 0xf000) >> 4) | (ar & 0x00ff);
138         }
139
140         svm_segment->limit = segment->limit;
141         svm_segment->base = segment->base;
142 }
143
144 static bool vcpu_set_cell_config(struct cell *cell, struct vmcb *vmcb)
145 {
146         /* No real need for this function; used for consistency with vmx.c */
147         vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
148         vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
149
150         return true;
151 }
152
153 static int vmcb_setup(struct per_cpu *cpu_data)
154 {
155         struct vmcb *vmcb = &cpu_data->vmcb;
156
157         memset(vmcb, 0, sizeof(struct vmcb));
158
159         vmcb->cr0 = read_cr0() & SVM_CR0_CLEARED_BITS;
160         vmcb->cr3 = cpu_data->linux_cr3;
161         vmcb->cr4 = read_cr4();
162
163         set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
164         set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
165         set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
166         set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
167         set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
168         set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
169         set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
170
171         set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
172         set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
173         set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
174
175         vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
176
177         vmcb->rflags = 0x02;
178         vmcb->rsp = cpu_data->linux_sp +
179                 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
180         vmcb->rip = cpu_data->linux_ip;
181
182         vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
183         vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
184         vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
185         vmcb->star = read_msr(MSR_STAR);
186         vmcb->lstar = read_msr(MSR_LSTAR);
187         vmcb->cstar = read_msr(MSR_CSTAR);
188         vmcb->sfmask = read_msr(MSR_SFMASK);
189         vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
190
191         vmcb->dr6 = 0x00000ff0;
192         vmcb->dr7 = 0x00000400;
193
194         /* Make the hypervisor visible */
195         vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
196
197         /* Linux uses custom PAT setting */
198         vmcb->g_pat = read_msr(MSR_IA32_PAT);
199
200         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
201         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
202         /* TODO: Do we need this for SVM ? */
203         /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
204         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
205         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
206         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
207
208         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
209         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
210
211         vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
212
213         vmcb->np_enable = 1;
214         /* No more than one guest owns the CPU */
215         vmcb->guest_asid = 1;
216
217         /* TODO: Setup AVIC */
218
219         return vcpu_set_cell_config(cpu_data->cell, vmcb);
220 }
221
222 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
223                                      unsigned long gphys,
224                                      unsigned long flags)
225 {
226         return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
227                         gphys, flags);
228 }
229
230 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
231 {
232         /* See APMv2, Section 15.25.5 */
233         *pte = (next_pt & 0x000ffffffffff000UL) |
234                 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
235 }
236
237 int vcpu_vendor_init(void)
238 {
239         unsigned long vm_cr;
240         int err, n;
241
242         err = svm_check_features();
243         if (err)
244                 return err;
245
246         vm_cr = read_msr(MSR_VM_CR);
247         if (vm_cr & VM_CR_SVMDIS)
248                 /* SVM disabled in BIOS */
249                 return -EPERM;
250
251         /* Nested paging is the same as the native one */
252         memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
253         for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
254                 npt_paging[n].set_next_pt = npt_set_next_pt;
255
256         /* This is always false for AMD now (except in nested SVM);
257            see Sect. 16.3.1 in APMv2 */
258         if (using_x2apic) {
259                 /* allow direct x2APIC access except for ICR writes */
260                 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
261                                 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
262                 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
263         } else {
264                 /* Enable Extended Interrupt LVT */
265                 apic_reserved_bits[0x50] = 0;
266                 if (has_avic) {
267                         avic_page = page_alloc(&remap_pool, 1);
268                         if (!avic_page)
269                                 return -ENOMEM;
270                 }
271         }
272
273         return vcpu_cell_init(&root_cell);
274 }
275
276 int vcpu_vendor_cell_init(struct cell *cell)
277 {
278         u64 flags;
279         int err;
280
281         /* allocate iopm (two 4-K pages + 3 bits) */
282         cell->svm.iopm = page_alloc(&mem_pool, 3);
283         if (!cell->svm.iopm)
284                 return -ENOMEM;
285
286         /* build root NPT of cell */
287         cell->svm.npt_structs.root_paging = npt_paging;
288         cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
289         if (!cell->svm.npt_structs.root_table)
290                 return -ENOMEM;
291
292         if (!has_avic) {
293                 /*
294                  * Map xAPIC as is; reads are passed, writes are trapped.
295                  */
296                 flags = PAGE_READONLY_FLAGS |
297                         PAGE_FLAG_US |
298                         PAGE_FLAG_UNCACHED;
299                 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
300                                     PAGE_SIZE, XAPIC_BASE,
301                                     flags,
302                                     PAGING_NON_COHERENT);
303         } else {
304                 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED;
305                 err = paging_create(&cell->svm.npt_structs,
306                                     paging_hvirt2phys(avic_page),
307                                     PAGE_SIZE, XAPIC_BASE,
308                                     flags,
309                                     PAGING_NON_COHERENT);
310         }
311
312         return err;
313 }
314
315 int vcpu_map_memory_region(struct cell *cell,
316                            const struct jailhouse_memory *mem)
317 {
318         u64 phys_start = mem->phys_start;
319         u32 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
320
321         if (mem->flags & JAILHOUSE_MEM_READ)
322                 flags |= PAGE_FLAG_PRESENT;
323         if (mem->flags & JAILHOUSE_MEM_WRITE)
324                 flags |= PAGE_FLAG_RW;
325         if (mem->flags & JAILHOUSE_MEM_EXECUTE)
326                 flags |= PAGE_FLAG_EXECUTE;
327         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
328                 phys_start = paging_hvirt2phys(&cell->comm_page);
329
330         return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
331                              mem->virt_start, flags, PAGING_NON_COHERENT);
332 }
333
334 int vcpu_unmap_memory_region(struct cell *cell,
335                              const struct jailhouse_memory *mem)
336 {
337         return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
338                               mem->size, PAGING_NON_COHERENT);
339 }
340
341 void vcpu_vendor_cell_exit(struct cell *cell)
342 {
343         paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
344                        PAGING_NON_COHERENT);
345         page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
346 }
347
348 int vcpu_init(struct per_cpu *cpu_data)
349 {
350         unsigned long efer;
351         int err;
352
353         err = svm_check_features();
354         if (err)
355                 return err;
356
357         efer = read_msr(MSR_EFER);
358         if (efer & EFER_SVME)
359                 return -EBUSY;
360
361         efer |= EFER_SVME;
362         write_msr(MSR_EFER, efer);
363
364         cpu_data->svm_state = SVMON;
365
366         if (!vmcb_setup(cpu_data))
367                 return -EIO;
368
369         write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
370
371         /* Enable Extended Interrupt LVT (xAPIC, as it is AMD-only) */
372         if (!using_x2apic)
373                 apic_reserved_bits[0x50] = 0;
374
375         return 0;
376 }
377
378 void vcpu_exit(struct per_cpu *cpu_data)
379 {
380         unsigned long efer;
381
382         if (cpu_data->svm_state == SVMOFF)
383                 return;
384
385         cpu_data->svm_state = SVMOFF;
386
387         efer = read_msr(MSR_EFER);
388         efer &= ~EFER_SVME;
389         write_msr(MSR_EFER, efer);
390
391         write_msr(MSR_VM_HSAVE_PA, 0);
392 }
393
394 void vcpu_activate_vmm(struct per_cpu *cpu_data)
395 {
396         /* TODO: Implement */
397         __builtin_unreachable();
398 }
399
400 void __attribute__((noreturn))
401 vcpu_deactivate_vmm(struct registers *guest_regs)
402 {
403         /* TODO: Implement */
404         __builtin_unreachable();
405 }
406
407 static void vcpu_reset(struct per_cpu *cpu_data, unsigned int sipi_vector)
408 {
409         struct vmcb *vmcb = &cpu_data->vmcb;
410         unsigned long val;
411         bool ok = true;
412
413         vmcb->cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
414         vmcb->cr3 = 0;
415         vmcb->cr4 = 0;
416
417         vmcb->rflags = 0x02;
418
419         val = 0;
420         if (sipi_vector == APIC_BSP_PSEUDO_SIPI) {
421                 val = 0xfff0;
422                 sipi_vector = 0xf0;
423         }
424         vmcb->rip = val;
425         vmcb->rsp = 0;
426
427         vmcb->cs.selector = sipi_vector << 8;
428         vmcb->cs.base = sipi_vector << 12;
429         vmcb->cs.limit = 0xffff;
430         vmcb->cs.access_rights = 0x009b;
431
432         vmcb->ds.selector = 0;
433         vmcb->ds.base = 0;
434         vmcb->ds.limit = 0xffff;
435         vmcb->ds.access_rights = 0x0093;
436
437         vmcb->es.selector = 0;
438         vmcb->es.base = 0;
439         vmcb->es.limit = 0xffff;
440         vmcb->es.access_rights = 0x0093;
441
442         vmcb->fs.selector = 0;
443         vmcb->fs.base = 0;
444         vmcb->fs.limit = 0xffff;
445         vmcb->fs.access_rights = 0x0093;
446
447         vmcb->gs.selector = 0;
448         vmcb->gs.base = 0;
449         vmcb->gs.limit = 0xffff;
450         vmcb->gs.access_rights = 0x0093;
451
452         vmcb->ss.selector = 0;
453         vmcb->ss.base = 0;
454         vmcb->ss.limit = 0xffff;
455         vmcb->ss.access_rights = 0x0093;
456
457         vmcb->tr.selector = 0;
458         vmcb->tr.base = 0;
459         vmcb->tr.limit = 0xffff;
460         vmcb->tr.access_rights = 0x008b;
461
462         vmcb->ldtr.selector = 0;
463         vmcb->ldtr.base = 0;
464         vmcb->ldtr.limit = 0xffff;
465         vmcb->ldtr.access_rights = 0x0082;
466
467         vmcb->gdtr.selector = 0;
468         vmcb->gdtr.base = 0;
469         vmcb->gdtr.limit = 0xffff;
470         vmcb->gdtr.access_rights = 0;
471
472         vmcb->idtr.selector = 0;
473         vmcb->idtr.base = 0;
474         vmcb->idtr.limit = 0xffff;
475         vmcb->idtr.access_rights = 0;
476
477         vmcb->efer = EFER_SVME;
478
479         /* These MSRs are undefined on reset */
480         vmcb->star = 0;
481         vmcb->lstar = 0;
482         vmcb->cstar = 0;
483         vmcb->sfmask = 0;
484         vmcb->sysenter_cs = 0;
485         vmcb->sysenter_eip = 0;
486         vmcb->sysenter_esp = 0;
487         vmcb->kerngsbase = 0;
488
489         vmcb->g_pat = 0x0007040600070406;
490
491         vmcb->dr7 = 0x00000400;
492
493         ok &= vcpu_set_cell_config(cpu_data->cell, vmcb);
494
495         /* This is always false, but to be consistent with vmx.c... */
496         if (!ok) {
497                 panic_printk("FATAL: CPU reset failed\n");
498                 panic_stop();
499         }
500 }
501
502 void vcpu_skip_emulated_instruction(unsigned int inst_len)
503 {
504         struct per_cpu *cpu_data = this_cpu_data();
505         struct vmcb *vmcb = &cpu_data->vmcb;
506         vmcb->rip += inst_len;
507 }
508
509 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
510 {
511         struct per_cpu *cpu_data = this_cpu_data();
512         struct vmcb *vmcb = &cpu_data->vmcb;
513
514         if (vmcb->efer & EFER_LMA) {
515                 pg_structs->root_paging = x86_64_paging;
516                 pg_structs->root_table_gphys =
517                         vmcb->cr3 & 0x000ffffffffff000UL;
518         } else if ((vmcb->cr0 & X86_CR0_PG) &&
519                    !(vmcb->cr4 & X86_CR4_PAE)) {
520                 pg_structs->root_paging = i386_paging;
521                 pg_structs->root_table_gphys =
522                         vmcb->cr3 & 0xfffff000UL;
523         } else if (!(vmcb->cr0 & X86_CR0_PG)) {
524                 /*
525                  * Can be in non-paged protected mode as well, but
526                  * the translation mechanism will stay the same ayway.
527                  */
528                 pg_structs->root_paging = realmode_paging;
529                 /*
530                  * This will make paging_get_guest_pages map the page
531                  * that also contains the bootstrap code and, thus, is
532                  * always present in a cell.
533                  */
534                 pg_structs->root_table_gphys = 0xff000;
535         } else {
536                 printk("FATAL: Unsupported paging mode\n");
537                 return false;
538         }
539         return true;
540 }
541
542 static bool svm_handle_msr_read(struct registers *guest_regs,
543                 struct per_cpu *cpu_data)
544 {
545         if (guest_regs->rcx >= MSR_X2APIC_BASE &&
546             guest_regs->rcx <= MSR_X2APIC_END) {
547                 vcpu_skip_emulated_instruction(X86_INST_LEN_RDMSR);
548                 x2apic_handle_read(guest_regs);
549                 return true;
550         } else {
551                 panic_printk("FATAL: Unhandled MSR read: %x\n",
552                              guest_regs->rcx);
553                 return false;
554         }
555 }
556
557 static bool svm_handle_msr_write(struct registers *guest_regs,
558                 struct per_cpu *cpu_data)
559 {
560         struct vmcb *vmcb = &cpu_data->vmcb;
561         unsigned long efer;
562         bool result = true;
563
564         if (guest_regs->rcx >= MSR_X2APIC_BASE &&
565             guest_regs->rcx <= MSR_X2APIC_END) {
566                 result = x2apic_handle_write(guest_regs, cpu_data);
567                 goto out;
568         }
569         if (guest_regs->rcx == MSR_EFER) {
570                 /* Never let a guest to disable SVME; see APMv2, Sect. 3.1.7 */
571                 efer = (guest_regs->rax & 0xffffffff) |
572                         (guest_regs->rdx << 32) | EFER_SVME;
573                 /* Flush TLB on LME/NXE change: See APMv2, Sect. 15.16 */
574                 if ((efer ^ vmcb->efer) & (EFER_LME | EFER_NXE))
575                         vcpu_tlb_flush();
576                 vmcb->efer = efer;
577                 goto out;
578         }
579
580         result = false;
581         panic_printk("FATAL: Unhandled MSR write: %x\n",
582                      guest_regs->rcx);
583 out:
584         if (result)
585                 vcpu_skip_emulated_instruction(X86_INST_LEN_WRMSR);
586         return result;
587 }
588
589 /*
590  * TODO: This handles unaccelerated (non-AVIC) access. AVIC should
591  * be treated separately in svm_handle_avic_access().
592  */
593 static bool svm_handle_apic_access(struct registers *guest_regs,
594                                    struct per_cpu *cpu_data)
595 {
596         struct vmcb *vmcb = &cpu_data->vmcb;
597         struct guest_paging_structures pg_structs;
598         unsigned int inst_len, offset;
599         bool is_write;
600
601         /* The caller is responsible for sanity checks */
602         is_write = !!(vmcb->exitinfo1 & 0x2);
603         offset = vmcb->exitinfo2 - XAPIC_BASE;
604
605         if (offset & 0x00f)
606                 goto out_err;
607
608         if (!vcpu_get_guest_paging_structs(&pg_structs))
609                 goto out_err;
610
611         inst_len = apic_mmio_access(guest_regs, cpu_data, vmcb->rip,
612                                     &pg_structs, offset >> 4, is_write);
613         if (!inst_len)
614                 goto out_err;
615
616         vcpu_skip_emulated_instruction(inst_len);
617         return true;
618
619 out_err:
620         panic_printk("FATAL: Unhandled APIC access, offset %d, is_write: %d\n",
621                      offset, is_write);
622         return false;
623 }
624
625 static void dump_guest_regs(struct registers *guest_regs, struct vmcb *vmcb)
626 {
627         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", vmcb->rip,
628                      vmcb->rsp, vmcb->rflags);
629         panic_printk("RAX: %p RBX: %p RCX: %p\n", guest_regs->rax,
630                      guest_regs->rbx, guest_regs->rcx);
631         panic_printk("RDX: %p RSI: %p RDI: %p\n", guest_regs->rdx,
632                      guest_regs->rsi, guest_regs->rdi);
633         panic_printk("CS: %x BASE: %p AR-BYTES: %x EFER.LMA %d\n",
634                      vmcb->cs.selector,
635                      vmcb->cs.base,
636                      vmcb->cs.access_rights,
637                      (vmcb->efer & EFER_LMA));
638         panic_printk("CR0: %p CR3: %p CR4: %p\n", vmcb->cr0,
639                      vmcb->cr3, vmcb->cr4);
640         panic_printk("EFER: %p\n", vmcb->efer);
641 }
642
643 static void vcpu_vendor_get_pf_intercept(struct per_cpu *cpu_data,
644                                          struct vcpu_pf_intercept *out)
645 {
646         struct vmcb *vmcb = &cpu_data->vmcb;
647
648         out->phys_addr = vmcb->exitinfo2;
649         out->is_write = !!(vmcb->exitinfo1 & 0x2);
650 }
651
652 static void vcpu_vendor_get_io_intercept(struct per_cpu *cpu_data,
653                                          struct vcpu_io_intercept *out)
654 {
655         struct vmcb *vmcb = &cpu_data->vmcb;
656         u64 exitinfo = vmcb->exitinfo1;
657
658         /* parse exit info for I/O instructions (see APM, 15.10.2 ) */
659         out->port = (exitinfo >> 16) & 0xFFFF;
660         out->size = (exitinfo >> 4) & 0x7;
661         out->in = !!(exitinfo & 0x1);
662         out->inst_len = vmcb->exitinfo2 - vmcb->rip;
663         out->rep_or_str = !!(exitinfo & 0x0c);
664 }
665
666 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
667 {
668         struct vmcb *vmcb = &cpu_data->vmcb;
669         struct vcpu_execution_state x_state;
670         struct vcpu_pf_intercept pf;
671         struct vcpu_io_intercept io;
672         bool res = false;
673         int sipi_vector;
674
675         /* Restore GS value expected by per_cpu data accessors */
676         write_msr(MSR_GS_BASE, (unsigned long)cpu_data);
677
678         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
679
680         switch (vmcb->exitcode) {
681         case VMEXIT_INVALID:
682                 panic_printk("FATAL: VM-Entry failure, error %d\n",
683                              vmcb->exitcode);
684                 break;
685         case VMEXIT_NMI:
686                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
687                 /* Temporarily enable GIF to consume pending NMI */
688                 asm volatile("stgi; clgi" : : : "memory");
689                 sipi_vector = x86_handle_events(cpu_data);
690                 if (sipi_vector >= 0) {
691                         printk("CPU %d received SIPI, vector %x\n",
692                                cpu_data->cpu_id, sipi_vector);
693                         vcpu_reset(cpu_data, sipi_vector);
694                         memset(guest_regs, 0, sizeof(*guest_regs));
695                 }
696                 iommu_check_pending_faults(cpu_data);
697                 return;
698         case VMEXIT_CPUID:
699                 /* FIXME: We are not intercepting CPUID now */
700                 return;
701         case VMEXIT_VMMCALL:
702                 vcpu_vendor_get_execution_state(&x_state);
703                 vcpu_handle_hypercall(guest_regs, &x_state);
704                 return;
705         case VMEXIT_MSR:
706                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MSR]++;
707                 if (!vmcb->exitinfo1)
708                         res = svm_handle_msr_read(guest_regs, cpu_data);
709                 else
710                         res = svm_handle_msr_write(guest_regs, cpu_data);
711                 if (res)
712                         return;
713                 break;
714         case VMEXIT_NPF:
715                 if ((vmcb->exitinfo1 & 0x7) == 0x7 &&
716                      vmcb->exitinfo2 >= XAPIC_BASE &&
717                      vmcb->exitinfo2 < XAPIC_BASE + PAGE_SIZE) {
718                         /* APIC access in non-AVIC mode */
719                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_XAPIC]++;
720                         if (svm_handle_apic_access(guest_regs, cpu_data))
721                                 return;
722                 } else {
723                         /* General MMIO (IOAPIC, PCI etc) */
724                         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MMIO]++;
725                         vcpu_vendor_get_pf_intercept(cpu_data, &pf);
726                         if (vcpu_handle_pt_violation(guest_regs, &pf))
727                                 return;
728                 }
729
730                 panic_printk("FATAL: Unhandled Nested Page Fault for (%p), "
731                              "error code is %x\n", vmcb->exitinfo2,
732                              vmcb->exitinfo1 & 0xf);
733                 break;
734         case VMEXIT_IOIO:
735                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_PIO]++;
736                 vcpu_vendor_get_io_intercept(cpu_data, &io);
737                 if (vcpu_handle_io_access(guest_regs, &io))
738                         return;
739                 break;
740         /* TODO: Handle VMEXIT_AVIC_NOACCEL and VMEXIT_AVIC_INCOMPLETE_IPI */
741         default:
742                 panic_printk("FATAL: Unexpected #VMEXIT, exitcode %x, "
743                              "exitinfo1 %p exitinfo2 %p\n",
744                              vmcb->exitcode, vmcb->exitinfo1, vmcb->exitinfo2);
745         }
746         dump_guest_regs(guest_regs, vmcb);
747         panic_park();
748 }
749
750 void vcpu_park(struct per_cpu *cpu_data)
751 {
752         /* TODO: Implement */
753 }
754
755 void vcpu_nmi_handler(struct per_cpu *cpu_data)
756 {
757         printk("Consuming pending NMI on CPU %d\n", cpu_data->cpu_id);
758 }
759
760 void vcpu_tlb_flush(void)
761 {
762         struct per_cpu *cpu_data = this_cpu_data();
763         struct vmcb *vmcb = &cpu_data->vmcb;
764
765         if (has_flush_by_asid)
766                 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
767         else
768                 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
769 }
770
771 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
772                               unsigned long pc, unsigned int *size)
773 {
774         struct per_cpu *cpu_data = this_cpu_data();
775         struct vmcb *vmcb = &cpu_data->vmcb;
776         unsigned long start;
777
778         if (has_assists) {
779                 if (!*size)
780                         return NULL;
781                 start = vmcb->rip - pc;
782                 if (start < vmcb->bytes_fetched) {
783                         *size = vmcb->bytes_fetched - start;
784                         return &vmcb->guest_bytes[start];
785                 } else {
786                         return NULL;
787                 }
788         } else {
789                 return vcpu_map_inst(pg_structs, pc, size);
790         }
791 }
792
793 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
794                                     struct vcpu_io_bitmap *iobm)
795 {
796         iobm->data = cell->svm.iopm;
797         iobm->size = sizeof(cell->svm.iopm);
798 }
799
800 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
801 {
802         struct per_cpu *cpu_data = this_cpu_data();
803
804         x_state->efer = cpu_data->vmcb.efer;
805         x_state->rflags = cpu_data->vmcb.rflags;
806         x_state->cs = cpu_data->vmcb.cs.selector;
807         x_state->rip = cpu_data->vmcb.rip;
808 }