]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/svm.c
x86: Add AMD-V cell initialization/exit code
[jailhouse.git] / hypervisor / arch / x86 / svm.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * Based on vmx.c written by Jan Kiszka.
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  */
16
17 #include <jailhouse/entry.h>
18 #include <jailhouse/cell-config.h>
19 #include <jailhouse/paging.h>
20 #include <jailhouse/printk.h>
21 #include <jailhouse/processor.h>
22 #include <jailhouse/string.h>
23 #include <asm/apic.h>
24 #include <asm/cell.h>
25 #include <asm/paging.h>
26 #include <asm/percpu.h>
27 #include <asm/processor.h>
28 #include <asm/svm.h>
29 #include <asm/vcpu.h>
30
31 /*
32  * NW bit is ignored by all modern processors, however some
33  * combinations of NW and CD bits are prohibited by SVM (see APMv2,
34  * Sect. 15.5). To handle this, we always keep the NW bit off.
35  */
36 #define SVM_CR0_CLEARED_BITS    ~X86_CR0_NW
37
38 static bool has_avic, has_assists, has_flush_by_asid;
39
40 static const struct segment invalid_seg;
41
42 static struct paging npt_paging[NPT_PAGE_DIR_LEVELS];
43
44 static u8 __attribute__((aligned(PAGE_SIZE))) msrpm[][0x2000/4] = {
45         [ SVM_MSRPM_0000 ] = {
46                 [      0/4 ...  0x017/4 ] = 0,
47                 [  0x018/4 ...  0x01b/4 ] = 0x80, /* 0x01b (w) */
48                 [  0x01c/4 ...  0x7ff/4 ] = 0,
49                 /* x2APIC MSRs - emulated if not present */
50                 [  0x800/4 ...  0x803/4 ] = 0x90, /* 0x802 (r), 0x803 (r) */
51                 [  0x804/4 ...  0x807/4 ] = 0,
52                 [  0x808/4 ...  0x80b/4 ] = 0x93, /* 0x808 (rw), 0x80a (r), 0x80b (w) */
53                 [  0x80c/4 ...  0x80f/4 ] = 0xc8, /* 0x80d (w), 0x80f (rw) */
54                 [  0x810/4 ...  0x813/4 ] = 0x55, /* 0x810 - 0x813 (r) */
55                 [  0x814/4 ...  0x817/4 ] = 0x55, /* 0x814 - 0x817 (r) */
56                 [  0x818/4 ...  0x81b/4 ] = 0x55, /* 0x818 - 0x81b (r) */
57                 [  0x81c/4 ...  0x81f/4 ] = 0x55, /* 0x81c - 0x81f (r) */
58                 [  0x820/4 ...  0x823/4 ] = 0x55, /* 0x820 - 0x823 (r) */
59                 [  0x824/4 ...  0x827/4 ] = 0x55, /* 0x823 - 0x827 (r) */
60                 [  0x828/4 ...  0x82b/4 ] = 0x03, /* 0x828 (rw) */
61                 [  0x82c/4 ...  0x82f/4 ] = 0xc0, /* 0x82f (rw) */
62                 [  0x830/4 ...  0x833/4 ] = 0xf3, /* 0x830 (rw), 0x832 (rw), 0x833 (rw) */
63                 [  0x834/4 ...  0x837/4 ] = 0xff, /* 0x834 - 0x837 (rw) */
64                 [  0x838/4 ...  0x83b/4 ] = 0x07, /* 0x838 (rw), 0x839 (r) */
65                 [  0x83c/4 ...  0x83f/4 ] = 0x70, /* 0x83e (rw), 0x83f (r) */
66                 [  0x840/4 ... 0x1fff/4 ] = 0,
67         },
68         [ SVM_MSRPM_C000 ] = {
69                 [      0/4 ...  0x07f/4 ] = 0,
70                 [  0x080/4 ...  0x083/4 ] = 0x02, /* 0x080 (w) */
71                 [  0x084/4 ... 0x1fff/4 ] = 0
72         },
73         [ SVM_MSRPM_C001 ] = {
74                 [      0/4 ... 0x1fff/4 ] = 0,
75         },
76         [ SVM_MSRPM_RESV ] = {
77                 [      0/4 ... 0x1fff/4 ] = 0,
78         }
79 };
80
81 static void *avic_page;
82
83 static int svm_check_features(void)
84 {
85         /* SVM is available */
86         if (!(cpuid_ecx(0x80000001) & X86_FEATURE_SVM))
87                 return -ENODEV;
88
89         /* Nested paging */
90         if (!(cpuid_edx(0x8000000A) & X86_FEATURE_NP))
91                 return -EIO;
92
93         /* Decode assists */
94         if ((cpuid_edx(0x8000000A) & X86_FEATURE_DECODE_ASSISTS))
95                 has_assists = true;
96
97         /* AVIC support */
98         if (cpuid_edx(0x8000000A) & X86_FEATURE_AVIC)
99                 has_avic = true;
100
101         /* TLB Flush by ASID support */
102         if (cpuid_edx(0x8000000A) & X86_FEATURE_FLUSH_BY_ASID)
103                 has_flush_by_asid = true;
104
105         return 0;
106 }
107
108 static void set_svm_segment_from_dtr(struct svm_segment *svm_segment,
109                                      const struct desc_table_reg *dtr)
110 {
111         struct svm_segment tmp = { 0 };
112
113         if (dtr) {
114                 tmp.base = dtr->base;
115                 tmp.limit = dtr->limit & 0xffff;
116         }
117
118         *svm_segment = tmp;
119 }
120
121 /* TODO: struct segment needs to be x86 generic, not VMX-specific one here */
122 static void set_svm_segment_from_segment(struct svm_segment *svm_segment,
123                                          const struct segment *segment)
124 {
125         u32 ar;
126
127         svm_segment->selector = segment->selector;
128
129         if (segment->access_rights == 0x10000) {
130                 svm_segment->access_rights = 0;
131         } else {
132                 ar = segment->access_rights;
133                 svm_segment->access_rights =
134                         ((ar & 0xf000) >> 4) | (ar & 0x00ff);
135         }
136
137         svm_segment->limit = segment->limit;
138         svm_segment->base = segment->base;
139 }
140
141 static bool vcpu_set_cell_config(struct cell *cell, struct vmcb *vmcb)
142 {
143         /* No real need for this function; used for consistency with vmx.c */
144         vmcb->iopm_base_pa = paging_hvirt2phys(cell->svm.iopm);
145         vmcb->n_cr3 = paging_hvirt2phys(cell->svm.npt_structs.root_table);
146
147         return true;
148 }
149
150 static int vmcb_setup(struct per_cpu *cpu_data)
151 {
152         struct vmcb *vmcb = &cpu_data->vmcb;
153
154         memset(vmcb, 0, sizeof(struct vmcb));
155
156         vmcb->cr0 = read_cr0() & SVM_CR0_CLEARED_BITS;
157         vmcb->cr3 = cpu_data->linux_cr3;
158         vmcb->cr4 = read_cr4();
159
160         set_svm_segment_from_segment(&vmcb->cs, &cpu_data->linux_cs);
161         set_svm_segment_from_segment(&vmcb->ds, &cpu_data->linux_ds);
162         set_svm_segment_from_segment(&vmcb->es, &cpu_data->linux_es);
163         set_svm_segment_from_segment(&vmcb->fs, &cpu_data->linux_fs);
164         set_svm_segment_from_segment(&vmcb->gs, &cpu_data->linux_gs);
165         set_svm_segment_from_segment(&vmcb->ss, &invalid_seg);
166         set_svm_segment_from_segment(&vmcb->tr, &cpu_data->linux_tss);
167
168         set_svm_segment_from_dtr(&vmcb->ldtr, NULL);
169         set_svm_segment_from_dtr(&vmcb->gdtr, &cpu_data->linux_gdtr);
170         set_svm_segment_from_dtr(&vmcb->idtr, &cpu_data->linux_idtr);
171
172         vmcb->cpl = 0; /* Linux runs in ring 0 before migration */
173
174         vmcb->rflags = 0x02;
175         vmcb->rsp = cpu_data->linux_sp +
176                 (NUM_ENTRY_REGS + 1) * sizeof(unsigned long);
177         vmcb->rip = cpu_data->linux_ip;
178
179         vmcb->sysenter_cs = read_msr(MSR_IA32_SYSENTER_CS);
180         vmcb->sysenter_eip = read_msr(MSR_IA32_SYSENTER_EIP);
181         vmcb->sysenter_esp = read_msr(MSR_IA32_SYSENTER_ESP);
182         vmcb->star = read_msr(MSR_STAR);
183         vmcb->lstar = read_msr(MSR_LSTAR);
184         vmcb->cstar = read_msr(MSR_CSTAR);
185         vmcb->sfmask = read_msr(MSR_SFMASK);
186         vmcb->kerngsbase = read_msr(MSR_KERNGS_BASE);
187
188         vmcb->dr6 = 0x00000ff0;
189         vmcb->dr7 = 0x00000400;
190
191         /* Make the hypervisor visible */
192         vmcb->efer = (cpu_data->linux_efer | EFER_SVME);
193
194         /* Linux uses custom PAT setting */
195         vmcb->g_pat = read_msr(MSR_IA32_PAT);
196
197         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_NMI;
198         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CR0_SEL_WRITE;
199         /* TODO: Do we need this for SVM ? */
200         /* vmcb->general1_intercepts |= GENERAL1_INTERCEPT_CPUID; */
201         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_IOIO_PROT;
202         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_MSR_PROT;
203         vmcb->general1_intercepts |= GENERAL1_INTERCEPT_SHUTDOWN_EVT;
204
205         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMRUN; /* Required */
206         vmcb->general2_intercepts |= GENERAL2_INTERCEPT_VMMCALL;
207
208         vmcb->msrpm_base_pa = paging_hvirt2phys(msrpm);
209
210         vmcb->np_enable = 1;
211         /* No more than one guest owns the CPU */
212         vmcb->guest_asid = 1;
213
214         /* TODO: Setup AVIC */
215
216         return vcpu_set_cell_config(cpu_data->cell, vmcb);
217 }
218
219 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
220                                      unsigned long gphys,
221                                      unsigned long flags)
222 {
223         return paging_virt2phys(&cpu_data->cell->svm.npt_structs,
224                         gphys, flags);
225 }
226
227 static void npt_set_next_pt(pt_entry_t pte, unsigned long next_pt)
228 {
229         /* See APMv2, Section 15.25.5 */
230         *pte = (next_pt & 0x000ffffffffff000UL) |
231                 (PAGE_DEFAULT_FLAGS | PAGE_FLAG_US);
232 }
233
234 int vcpu_vendor_init(void)
235 {
236         unsigned long vm_cr;
237         int err, n;
238
239         err = svm_check_features();
240         if (err)
241                 return err;
242
243         vm_cr = read_msr(MSR_VM_CR);
244         if (vm_cr & VM_CR_SVMDIS)
245                 /* SVM disabled in BIOS */
246                 return -EPERM;
247
248         /* Nested paging is the same as the native one */
249         memcpy(npt_paging, x86_64_paging, sizeof(npt_paging));
250         for (n = 0; n < NPT_PAGE_DIR_LEVELS; n++)
251                 npt_paging[n].set_next_pt = npt_set_next_pt;
252
253         /* This is always false for AMD now (except in nested SVM);
254            see Sect. 16.3.1 in APMv2 */
255         if (using_x2apic) {
256                 /* allow direct x2APIC access except for ICR writes */
257                 memset(&msrpm[SVM_MSRPM_0000][MSR_X2APIC_BASE/4], 0,
258                                 (MSR_X2APIC_END - MSR_X2APIC_BASE + 1)/4);
259                 msrpm[SVM_MSRPM_0000][MSR_X2APIC_ICR/4] = 0x02;
260         } else {
261                 /* Enable Extended Interrupt LVT */
262                 apic_reserved_bits[0x50] = 0;
263                 if (has_avic) {
264                         avic_page = page_alloc(&remap_pool, 1);
265                         if (!avic_page)
266                                 return -ENOMEM;
267                 }
268         }
269
270         return vcpu_cell_init(&root_cell);
271 }
272
273 int vcpu_vendor_cell_init(struct cell *cell)
274 {
275         u64 flags;
276         int err;
277
278         /* allocate iopm (two 4-K pages + 3 bits) */
279         cell->svm.iopm = page_alloc(&mem_pool, 3);
280         if (!cell->svm.iopm)
281                 return -ENOMEM;
282
283         /* build root NPT of cell */
284         cell->svm.npt_structs.root_paging = npt_paging;
285         cell->svm.npt_structs.root_table = page_alloc(&mem_pool, 1);
286         if (!cell->svm.npt_structs.root_table)
287                 return -ENOMEM;
288
289         if (!has_avic) {
290                 /*
291                  * Map xAPIC as is; reads are passed, writes are trapped.
292                  */
293                 flags = PAGE_READONLY_FLAGS |
294                         PAGE_FLAG_US |
295                         PAGE_FLAG_UNCACHED;
296                 err = paging_create(&cell->svm.npt_structs, XAPIC_BASE,
297                                     PAGE_SIZE, XAPIC_BASE,
298                                     flags,
299                                     PAGING_NON_COHERENT);
300         } else {
301                 flags = PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED;
302                 err = paging_create(&cell->svm.npt_structs,
303                                     paging_hvirt2phys(avic_page),
304                                     PAGE_SIZE, XAPIC_BASE,
305                                     flags,
306                                     PAGING_NON_COHERENT);
307         }
308
309         return err;
310 }
311
312 int vcpu_map_memory_region(struct cell *cell,
313                            const struct jailhouse_memory *mem)
314 {
315         u64 phys_start = mem->phys_start;
316         u32 flags = PAGE_FLAG_US; /* See APMv2, Section 15.25.5 */
317
318         if (mem->flags & JAILHOUSE_MEM_READ)
319                 flags |= PAGE_FLAG_PRESENT;
320         if (mem->flags & JAILHOUSE_MEM_WRITE)
321                 flags |= PAGE_FLAG_RW;
322         if (mem->flags & JAILHOUSE_MEM_EXECUTE)
323                 flags |= PAGE_FLAG_EXECUTE;
324         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
325                 phys_start = paging_hvirt2phys(&cell->comm_page);
326
327         return paging_create(&cell->svm.npt_structs, phys_start, mem->size,
328                              mem->virt_start, flags, PAGING_NON_COHERENT);
329 }
330
331 int vcpu_unmap_memory_region(struct cell *cell,
332                              const struct jailhouse_memory *mem)
333 {
334         return paging_destroy(&cell->svm.npt_structs, mem->virt_start,
335                               mem->size, PAGING_NON_COHERENT);
336 }
337
338 void vcpu_vendor_cell_exit(struct cell *cell)
339 {
340         paging_destroy(&cell->svm.npt_structs, XAPIC_BASE, PAGE_SIZE,
341                        PAGING_NON_COHERENT);
342         page_free(&mem_pool, cell->svm.npt_structs.root_table, 1);
343 }
344
345 int vcpu_init(struct per_cpu *cpu_data)
346 {
347         unsigned long efer;
348         int err;
349
350         err = svm_check_features();
351         if (err)
352                 return err;
353
354         efer = read_msr(MSR_EFER);
355         if (efer & EFER_SVME)
356                 return -EBUSY;
357
358         efer |= EFER_SVME;
359         write_msr(MSR_EFER, efer);
360
361         cpu_data->svm_state = SVMON;
362
363         if (!vmcb_setup(cpu_data))
364                 return -EIO;
365
366         write_msr(MSR_VM_HSAVE_PA, paging_hvirt2phys(cpu_data->host_state));
367
368         /* Enable Extended Interrupt LVT (xAPIC, as it is AMD-only) */
369         if (!using_x2apic)
370                 apic_reserved_bits[0x50] = 0;
371
372         return 0;
373 }
374
375 void vcpu_exit(struct per_cpu *cpu_data)
376 {
377         unsigned long efer;
378
379         if (cpu_data->svm_state == SVMOFF)
380                 return;
381
382         cpu_data->svm_state = SVMOFF;
383
384         efer = read_msr(MSR_EFER);
385         efer &= ~EFER_SVME;
386         write_msr(MSR_EFER, efer);
387
388         write_msr(MSR_VM_HSAVE_PA, 0);
389 }
390
391 void vcpu_activate_vmm(struct per_cpu *cpu_data)
392 {
393         /* TODO: Implement */
394         __builtin_unreachable();
395 }
396
397 void __attribute__((noreturn))
398 vcpu_deactivate_vmm(struct registers *guest_regs)
399 {
400         /* TODO: Implement */
401         __builtin_unreachable();
402 }
403
404 void vcpu_skip_emulated_instruction(unsigned int inst_len)
405 {
406         /* TODO: Implement */
407 }
408
409 bool vcpu_get_guest_paging_structs(struct guest_paging_structures *pg_structs)
410 {
411         struct per_cpu *cpu_data = this_cpu_data();
412         struct vmcb *vmcb = &cpu_data->vmcb;
413
414         if (vmcb->efer & EFER_LMA) {
415                 pg_structs->root_paging = x86_64_paging;
416                 pg_structs->root_table_gphys =
417                         vmcb->cr3 & 0x000ffffffffff000UL;
418         } else if ((vmcb->cr0 & X86_CR0_PG) &&
419                    !(vmcb->cr4 & X86_CR4_PAE)) {
420                 pg_structs->root_paging = i386_paging;
421                 pg_structs->root_table_gphys =
422                         vmcb->cr3 & 0xfffff000UL;
423         } else if (!(vmcb->cr0 & X86_CR0_PG)) {
424                 /*
425                  * Can be in non-paged protected mode as well, but
426                  * the translation mechanism will stay the same ayway.
427                  */
428                 pg_structs->root_paging = realmode_paging;
429                 /*
430                  * This will make paging_get_guest_pages map the page
431                  * that also contains the bootstrap code and, thus, is
432                  * always present in a cell.
433                  */
434                 pg_structs->root_table_gphys = 0xff000;
435         } else {
436                 printk("FATAL: Unsupported paging mode\n");
437                 return false;
438         }
439         return true;
440 }
441
442 void vcpu_handle_exit(struct registers *guest_regs, struct per_cpu *cpu_data)
443 {
444         /* TODO: Implement */
445 }
446
447 void vcpu_park(struct per_cpu *cpu_data)
448 {
449         /* TODO: Implement */
450 }
451
452 void vcpu_nmi_handler(struct per_cpu *cpu_data)
453 {
454         /* TODO: Implement */
455 }
456
457 void vcpu_tlb_flush(void)
458 {
459         struct per_cpu *cpu_data = this_cpu_data();
460         struct vmcb *vmcb = &cpu_data->vmcb;
461
462         if (has_flush_by_asid)
463                 vmcb->tlb_control = SVM_TLB_FLUSH_GUEST;
464         else
465                 vmcb->tlb_control = SVM_TLB_FLUSH_ALL;
466 }
467
468 const u8 *vcpu_get_inst_bytes(const struct guest_paging_structures *pg_structs,
469                               unsigned long pc, unsigned int *size)
470 {
471         /* TODO: Implement */
472         return NULL;
473 }
474
475 void vcpu_vendor_get_cell_io_bitmap(struct cell *cell,
476                                     struct vcpu_io_bitmap *iobm)
477 {
478         iobm->data = cell->svm.iopm;
479         iobm->size = sizeof(cell->svm.iopm);
480 }
481
482 void vcpu_vendor_get_execution_state(struct vcpu_execution_state *x_state)
483 {
484         /* TODO: Implement */
485 }