8 class Vm_svm : public Vm
11 static void resume_vm_svm(Mword phys_vmcb, Mword *regs)
12 asm("resume_vm_svm") __attribute__((__regparm__(3)));
13 Unsigned8 _asid[Config::Max_num_cpus];
14 Unsigned32 _asid_generation[Config::Max_num_cpus];
23 // ------------------------------------------------------------------------
24 INTERFACE [svm && debug]:
26 EXTENSION class Vm_svm
29 struct Log_vm_svm_exit
31 Mword exitcode, exitinfo1, exitinfo2, rip;
34 static unsigned log_fmt_svm(Tb_entry *, int max, char *buf) asm ("__fmt_vm_svm_exit");
37 // ------------------------------------------------------------------------
41 #include "mem_space.h"
45 #include "thread.h" // XXX: circular dep, move this out here!
46 #include "thread_state.h" // XXX: circular dep, move this out here!
49 // ------------------------------------------------------------------------
50 IMPLEMENTATION [svm && ia32]:
59 PRIVATE inline NEEDS["virt.h"]
61 Vm_svm::get_vm_cr3(Vmcb *)
63 // When running in 32bit mode we already return the page-table of our Vm
64 // object, whether we're running with shadow or nested paging
65 return mem_space()->phys_dir();
68 //----------------------------------------------------------------------------
69 IMPLEMENTATION [svm && amd64]:
78 PRIVATE inline NEEDS["virt.h"]
80 Vm_svm::get_vm_cr3(Vmcb *v)
82 // When we have nested paging, we just return the 4lvl host page-table of
85 return mem_space()->phys_dir();
87 // When running with shadow paging and the guest is running in long mode
88 // and has paging enabled, we can just return the 4lvl page table of our
90 if ( (v->state_save_area.efer & EFER_LME)
91 && (v->state_save_area.cr0 & CR0_PG))
92 return mem_space()->phys_dir();
94 // Now it's getting tricky when running with shadow paging.
95 // We need to obey the following rules:
96 // - When the guest is not running in 64bit mode the CR3 one can set for
97 // the page-table must be below 4G physical memory (i.e. bit 32-63 must
98 // be zero). This is unfortunate when the host has memory above 4G as
99 // Fiasco gets its memory from the end of physical memory, i.e.
100 // page-table memory is above 4G.
101 // - We need an appropriate page-table format for 32bit!
102 // That means either a 2lvl page-table or a 3lvl PAE one. That would
103 // require to maintain two page-tables for the guest, one for 32bit
104 // mode execution and one for 64 bit execution. It is needed either for
105 // the transition from real to long-mode via protected mode or for
106 // 32bit only guests.
107 // There's one trick to avoid having two PTs: 4lvl-PTs and 3lvl-PAE-PTs
108 // have much in common so that it's possible to just take the the PDPE
109 // one of the host as the 3lvl-PAE-PT for the guest. Well, not quite.
110 // The problem is that SVM checks that MBZ bits in the PAE-PT entries
111 // are really 0 as written in the spec. Now the 4lvl PT contains rights
112 // bits there, so that this type of PT is refused and does not work on
114 // So why is the code still here? Well, QEmu isn't so picky about the
115 // bits in the PDPE and it thus works there...
116 Address vm_cr3 = mem_space()->dir()->walk(Virt_addr(0), 0).e->addr();
117 if (EXPECT_FALSE(!vm_cr3))
119 // force allocation of new secondary page-table level
120 mem_space()->dir()->alloc_cast<Mem_space_q_alloc>()
121 ->walk(Virt_addr(0), 1, Mem_space_q_alloc(ram_quota(),
122 Mapped_allocator::allocator()));
123 vm_cr3 = mem_space()->dir()->walk(Virt_addr(0), 0).e->addr();
126 if (EXPECT_FALSE(vm_cr3 >= 1UL << 32))
128 WARN("svm: Host page-table not under 4G, sorry.\n");
135 //----------------------------------------------------------------------------
136 IMPLEMENTATION [svm]:
142 return _asid[current_cpu()];
147 Vm_svm::asid (Unsigned8 asid)
149 _asid[current_cpu()] = asid;
154 Vm_svm::asid_generation ()
156 return _asid_generation[current_cpu()];
161 Vm_svm::asid_generation (Unsigned32 generation)
163 _asid_generation[current_cpu()] = generation;
167 Vm_svm::Vm_svm(Ram_quota *q)
170 memset(_asid, 0, sizeof(_asid));
171 memset(_asid_generation, 0, sizeof(_asid_generation));
176 Vm_svm::operator new (size_t size, void *p)
178 assert (size == sizeof (Vm_svm));
184 Vm_svm::operator delete (void *ptr)
186 Vm_svm *t = reinterpret_cast<Vm_svm*>(ptr);
187 allocator<Vm_svm>()->q_free(t->ram_quota(), ptr);
193 // - force fpu ownership
194 // - debug registers not covered by VMCB
198 Vm_svm::copy_state_save_area(Vmcb *dest, Vmcb *src)
200 Vmcb_state_save_area *d = &dest->state_save_area;
201 Vmcb_state_save_area *s = &src->state_save_area;
203 d->es_sel = s->es_sel;
204 d->es_attrib = s->es_attrib;
205 d->es_limit = s->es_limit;
206 d->es_base = s->es_base;
208 d->cs_sel = s->cs_sel;
209 d->cs_attrib = s->cs_attrib;
210 d->cs_limit = s->cs_limit;
211 d->cs_base = s->cs_base;
213 d->ss_sel = s->ss_sel;
214 d->ss_attrib = s->ss_attrib;
215 d->ss_limit = s->ss_limit;
216 d->ss_base = s->ss_base;
218 d->ds_sel = s->ds_sel;
219 d->ds_attrib = s->ds_attrib;
220 d->ds_limit = s->ds_limit;
221 d->ds_base = s->ds_base;
223 d->fs_sel = s->fs_sel;
224 d->fs_attrib = s->fs_attrib;
225 d->fs_limit = s->fs_limit;
226 d->fs_base = s->fs_base;
228 d->gs_sel = s->gs_sel;
229 d->gs_attrib = s->gs_attrib;
230 d->gs_limit = s->gs_limit;
231 d->gs_base = s->gs_base;
233 d->gdtr_sel = s->gdtr_sel;
234 d->gdtr_attrib = s->gdtr_attrib;
235 d->gdtr_limit = s->gdtr_limit;
236 d->gdtr_base = s->gdtr_base;
238 d->ldtr_sel = s->ldtr_sel;
239 d->ldtr_attrib = s->ldtr_attrib;
240 d->ldtr_limit = s->ldtr_limit;
241 d->ldtr_base = s->ldtr_base;
243 d->idtr_sel = s->idtr_sel;
244 d->idtr_attrib = s->idtr_attrib;
245 d->idtr_limit = s->idtr_limit;
246 d->idtr_base = s->idtr_base;
248 d->tr_sel = s->tr_sel;
249 d->tr_attrib = s->tr_attrib;
250 d->tr_limit = s->tr_limit;
251 d->tr_base = s->tr_base;
261 d->rflags = s->rflags;
270 d->sfmask = s->sfmask;
271 d->kernelgsbase = s->kernelgsbase;
272 d->sysenter_cs = s->sysenter_cs;
273 d->sysenter_esp = s->sysenter_esp;
274 d->sysenter_eip = s->sysenter_eip;
278 d->dbgctl = s->dbgctl;
279 d->br_from = s->br_from;
281 d->lastexcpfrom = s->lastexcpfrom;
282 d->last_excpto = s->last_excpto;
288 Vm_svm::copy_control_area(Vmcb *dest, Vmcb *src)
290 Vmcb_control_area *d = &dest->control_area;
291 Vmcb_control_area *s = &src->control_area;
293 d->intercept_rd_crX = s->intercept_rd_crX;
294 d->intercept_wr_crX = s->intercept_wr_crX;
296 d->intercept_rd_drX = s->intercept_rd_drX;
297 d->intercept_wr_drX = s->intercept_wr_drX;
299 d->intercept_exceptions = s->intercept_exceptions;
301 d->intercept_instruction0 = s->intercept_instruction0;
302 d->intercept_instruction1 = s->intercept_instruction1;
304 // skip iopm_base_pa and msrpm_base_pa
306 d->tsc_offset = s->tsc_offset;
307 d->guest_asid_tlb_ctl = s->guest_asid_tlb_ctl;
308 d->interrupt_ctl = s->interrupt_ctl;
309 d->interrupt_shadow = s->interrupt_shadow;
310 d->exitcode = s->exitcode;
311 d->exitinfo1 = s->exitinfo1;
312 d->exitinfo2 = s->exitinfo2;
313 d->exitintinfo = s->exitintinfo;
314 d->np_enable = s->np_enable;
316 d->eventinj = s->eventinj;
318 d->lbr_virtualization_enable = s->lbr_virtualization_enable;
322 /* skip anything that does not change */
325 Vm_svm::copy_control_area_back(Vmcb *dest, Vmcb *src)
327 Vmcb_control_area *d = &dest->control_area;
328 Vmcb_control_area *s = &src->control_area;
330 d->interrupt_ctl = s->interrupt_ctl;
331 d->interrupt_shadow = s->interrupt_shadow;
333 d->exitcode = s->exitcode;
334 d->exitinfo1 = s->exitinfo1;
335 d->exitinfo2 = s->exitinfo2;
336 d->exitintinfo = s->exitintinfo;
338 d->eventinj = s->eventinj;
341 /** \brief Choose an ASID for this Vm.
343 * Choose an ASID for this Vm. The ASID provided by userspace is ignored
344 * instead the kernel picks one.
345 * Userspace uses the flush-bit to receive a new ASID for this Vm.
346 * All ASIDs are flushed as soon as the kernel runs out of ASIDs.
348 * @param vmcb_s external VMCB provided by userspace
349 * @param kernel_vmcb_s our VMCB
354 Vm_svm::configure_asid (Vmcb *vmcb_s, Vmcb *kernel_vmcb_s)
356 assert (cpu_lock.test());
358 Svm &s = Svm::cpus.cpu(current_cpu());
360 if (// vmm requests flush
361 ((vmcb_s->control_area.guest_asid_tlb_ctl >> 32) & 1) == 1 ||
362 // our asid is not valid or expired
363 !(s.asid_valid(asid(), asid_generation())))
366 asid_generation(s.global_asid_generation());
369 assert(s.asid_valid(asid(), asid_generation()));
371 kernel_vmcb_s->control_area.guest_asid_tlb_ctl = asid();
372 if (s.flush_all_asids())
374 kernel_vmcb_s->control_area.guest_asid_tlb_ctl |= (1ULL << 32);
375 s.flush_all_asids(false);
378 kernel_vmcb_s->control_area.guest_asid_tlb_ctl = 1;
379 kernel_vmcb_s->control_area.guest_asid_tlb_ctl |= (1ULL << 32);
385 Vm_svm::sys_vm_run(Syscall_frame *f, Utcb *utcb)
388 Unsigned64 orig_cr3, orig_ncr3;
390 assert (cpu_lock.test());
392 /* these 4 must not use ldt entries */
393 assert (!(Cpu::get_cs() & (1 << 2)));
394 assert (!(Cpu::get_ss() & (1 << 2)));
395 assert (!(Cpu::get_ds() & (1 << 2)));
396 assert (!(Cpu::get_es() & (1 << 2)));
398 Svm &s = Svm::cpus.cpu(current_cpu());
400 L4_msg_tag const &tag = f->tag();
402 if (EXPECT_FALSE(!s.svm_enabled()))
404 WARN("svm: not supported/enabled\n");
405 return commit_result(-L4_err::EInval);
408 if (EXPECT_FALSE(tag.words() < 1 + Svm::Gpregs_words))
410 WARN("svm: Invalid message length\n");
411 return commit_result(-L4_err::EInval);
414 L4_snd_item_iter vmcb_item(utcb, tag.words());
416 if (EXPECT_FALSE(!tag.items() || !vmcb_item.next()))
417 return commit_result(-L4_err::EInval);
419 L4_fpage vmcb_fpage(vmcb_item.get()->d);
421 if (EXPECT_FALSE(!vmcb_fpage.is_mempage()))
423 WARN("svm: Fpage invalid\n");
424 return commit_error(utcb, L4_error::Overflow);
427 if (EXPECT_FALSE(vmcb_fpage.order() < 12))
428 return commit_result(-L4_err::EInval);
430 Vmcb *vmcb_s = (Vmcb *)(Virt_addr(vmcb_fpage.mem_address()).value());
431 Vmcb *kernel_vmcb_s = s.kernel_vmcb();
433 if (EXPECT_FALSE(vmcb_s->np_enabled() && !s.has_npt()))
435 WARN("svm: No NPT available\n");
436 return commit_result(-L4_err::EInval);
439 Address vm_cr3 = get_vm_cr3(vmcb_s);
440 // can only fail on 64bit, will be optimized away on 32bit
441 if (EXPECT_FALSE(is_64bit() && !vm_cr3))
442 return commit_result(-L4_err::ENomem);
444 Mem_space::Phys_addr phys_vmcb;
445 Mem_space::Size size;
447 unsigned int page_attribs;
449 Mem_space *const curr_mem_space = current()->space()->mem_space();
450 resident = curr_mem_space->v_lookup(Virt_addr(vmcb_s), &phys_vmcb, &size, &page_attribs);
454 WARN("svm: VMCB invalid\n");
455 return commit_result(-L4_err::EInval);
458 // currently only support for nested pagetables
459 // if shadow page tables are to be allowed then cr0
460 // needs further scrutiny and cr3 must not be accessible
461 if((vmcb_s->control_area.np_enable & 1) != 1)
462 return commit_result(-L4_err::EInval);
465 // neither EFER.LME nor EFER.LMA must be set
466 if (EXPECT_FALSE(!is_64bit()
467 && (vmcb_s->state_save_area.efer & (EFER_LME | EFER_LMA))))
469 WARN("svm: EFER invalid %llx\n", vmcb_s->state_save_area.efer);
470 return commit_result(-L4_err::EInval);
473 // EFER.SVME must be set
474 if (!(vmcb_s->state_save_area.efer & 0x1000))
476 WARN("svm: EFER invalid %llx\n", vmcb_s->state_save_area.efer);
477 return commit_result(-L4_err::EInval);
479 // allow PAE in combination with NPT
481 // CR4.PAE must be clear
482 if(vmcb_s->state_save_area.cr4 & 0x20)
483 return commit_result(-L4_err::EInval);
487 // This generates a circular dep between thread<->task, this cries for a
488 // new abstraction...
489 if (!(current()->state() & Thread_fpu_owner))
491 if (!current_thread()->switchin_fpu())
493 WARN("svm: switchin_fpu failed\n");
494 return commit_result(-L4_err::EInval);
498 #if 0 //should never happen
499 host_cr0 = Cpu::get_cr0();
500 // the VMM does not currently own the fpu but wants to
501 // make it available for the guest. This may happen
502 // if it was descheduled between activating the fpu and
503 // executing the vm_run operation
504 if (!(vmcb_s->state_save_area.cr0 & 0x8) && (host_cr0 & 0x8))
506 WARN("svm: FPU TS\n");
507 return commit_result(-L4_err::EInval);
511 // increment our refcount, and drop it at the end automatically
512 Ref_ptr<Vm_svm> pin_myself(this);
516 orig_cr3 = vmcb_s->state_save_area.cr3;
517 orig_ncr3 = vmcb_s->control_area.n_cr3;
519 copy_control_area(kernel_vmcb_s, vmcb_s);
520 copy_state_save_area(kernel_vmcb_s, vmcb_s);
522 if (EXPECT_FALSE(is_64bit() && !kernel_vmcb_s->np_enabled()
523 && (kernel_vmcb_s->state_save_area.cr0 & CR0_PG)
524 && !(kernel_vmcb_s->state_save_area.cr4 & CR4_PAE)))
526 WARN("svm: No 32bit shadow page-tables on AMD64, use PAE!\n");
527 return commit_result(-L4_err::EInval);
530 // set MCE according to host
531 kernel_vmcb_s->state_save_area.cr4 |= Cpu::get_cr4() & CR4_MCE;
533 // allow w access to cr0, cr2, cr3
534 // allow r access to cr0, cr2, cr3, cr4
535 // to do: check if enabling PAE in cr4 needs to be controlled
537 // allow r/w access to dr[0-7]
538 kernel_vmcb_s->control_area.intercept_rd_drX |= 0xff00;
539 kernel_vmcb_s->control_area.intercept_wr_drX |= 0xff00;
542 // intercept exception vectors 0-31
543 kernel_vmcb_s->control_area.intercept_exceptions = 0xffffffff;
546 // enable iopm and msrpm
547 kernel_vmcb_s->control_area.intercept_instruction0 |= 0x18000000;
548 // intercept FERR_FREEZE and shutdown events
549 kernel_vmcb_s->control_area.intercept_instruction0 |= 0xc0000000;
550 // intercept INTR/NMI/SMI/INIT
551 kernel_vmcb_s->control_area.intercept_instruction0 |= 0xf;
553 kernel_vmcb_s->control_area.intercept_instruction0 |= (1 << 22);
555 kernel_vmcb_s->control_area.intercept_instruction0 |= (1 << 24);
556 // intercept task switch
557 kernel_vmcb_s->control_area.intercept_instruction0 |= (1 << 29);
558 // intercept shutdown
559 kernel_vmcb_s->control_area.intercept_instruction0 |= (1 << 31);
560 // intercept MONITOR/MWAIT
561 kernel_vmcb_s->control_area.intercept_instruction1 |= (1 << 10) | (1 << 11);
563 // intercept virtualization related instructions
564 // vmrun interception is required by the hardware
565 kernel_vmcb_s->control_area.intercept_instruction1 |= 0xff;
567 Mword kernel_vmcb_pa = s.kernel_vmcb_pa();
568 Unsigned64 iopm_base_pa = s.iopm_base_pa();
569 Unsigned64 msrpm_base_pa = s.msrpm_base_pa();
571 kernel_vmcb_s->control_area.iopm_base_pa = iopm_base_pa;
572 kernel_vmcb_s->control_area.msrpm_base_pa = msrpm_base_pa;
574 configure_asid(vmcb_s, kernel_vmcb_s);
576 // 7:0 V_TPR, 8 V_IRQ, 15:9 reserved SBZ,
577 // 19:16 V_INTR_PRIO, 20 V_IGN_TPR, 23:21 reserved SBZ
578 // 24 V_INTR_MASKING 31:25 reserved SBZ
579 // 39:32 V_INTR_VECTOR, 63:40 reserved SBZ
581 kernel_vmcb_s->control_area.interrupt_ctl = 0x10f0000;
583 // enable IRQ masking virtualization
584 kernel_vmcb_s->control_area.interrupt_ctl |= 0x01000000;
587 // 0 INTERRUPT_SHADOW, 31:1 reserved SBZ
588 // 63:32 reserved SBZ
589 kernel_vmcb_s->control_area.interrupt_shadow = 0;
592 kernel_vmcb_s->control_area.exitcode = 0;
593 kernel_vmcb_s->control_area.exitinfo1 = 0;
594 kernel_vmcb_s->control_area.exitinfo2 = 0;
595 kernel_vmcb_s->control_area.exitintinfo = 0;
598 // 0/1 NP_ENABLE, 31:1 reserved SBZ
599 kernel_vmcb_s->control_area.np_enable = 1;
601 // 31 VALID, EVENTINJ
602 kernel_vmcb_s->control_area.eventinj = 0;
606 kernel_vmcb_s->control_area.n_cr3 = vm_cr3;
608 if (!kernel_vmcb_s->np_enabled())
610 // to do: check that the vmtask has the
611 // VM property set, i.e. does not contain mappings
612 // to the fiasco kernel regions or runs with PL 3
614 // printf("nested paging disabled, use n_cr3 as cr3\n");
615 kernel_vmcb_s->state_save_area.cr3 = vm_cr3;
617 // intercept accesses to cr0, cr3 and cr4
618 kernel_vmcb_s->control_area.intercept_rd_crX = 0xfff9;
619 kernel_vmcb_s->control_area.intercept_wr_crX = 0xfff9;
623 kernel_vmcb_s->control_area.lbr_virtualization_enable = 0;
628 // - initialize VM_HSAVE_PA (done)
629 // - supply trusted msrpm_base_pa and iopm_base_pa (done)
630 // - save host state not covered by VMRUN/VMEXIT (ldt, some segments etc) (done)
631 // - disable interupts (done)
632 // - trigger interecepted device and timer interrupts (done, not necessary)
633 // - check host CR0.TS (floating point registers) (done)
635 Unsigned64 sysenter_cs, sysenter_eip, sysenter_esp;
640 sysenter_cs = Cpu::rdmsr(MSR_SYSENTER_CS);
641 sysenter_eip = Cpu::rdmsr(MSR_SYSENTER_EIP);
642 sysenter_esp = Cpu::rdmsr(MSR_SYSENTER_ESP);
647 ldtr = Cpu::get_ldt();
651 tr_entry = (*Cpu::cpus.cpu(current_cpu()).get_gdt())[tr / 8];
654 // to do: check if the nested page table walker looks
655 // into the TLB. if so, global pages have to be disabled in
657 cr4 = Cpu::get_cr4();
660 // disable support for global pages as the vm task has
661 // a divergent upper memory region from the regular tasks
662 Cpu::set_cr4(cr4 & ~CR4_PGE);
665 resume_vm_svm(kernel_vmcb_pa, &utcb->values[1]);
673 Cpu::wrmsr(sysenter_cs, MSR_SYSENTER_CS);
674 Cpu::wrmsr(sysenter_eip, MSR_SYSENTER_EIP);
675 Cpu::wrmsr(sysenter_esp, MSR_SYSENTER_ESP);
684 tss_entry = (*Cpu::cpus.cpu(current_cpu()).get_gdt())[tr / 8];
685 tss_entry.access &= 0xfd;
686 (*Cpu::cpus.cpu(current_cpu()).get_gdt())[tr / 8] = tss_entry;
688 Cpu::set_tr(tr); // TODO move under stgi in asm
690 copy_state_save_area(vmcb_s, kernel_vmcb_s);
691 copy_control_area_back(vmcb_s, kernel_vmcb_s);
693 if (!(vmcb_s->np_enabled()))
694 vmcb_s->state_save_area.cr3 = orig_cr3;
696 vmcb_s->control_area.n_cr3 = orig_ncr3;
698 LOG_TRACE("VM-SVM", "svm", current(), __fmt_vm_svm_exit,
699 Log_vm_svm_exit *l = tbe->payload<Log_vm_svm_exit>();
700 l->exitcode = vmcb_s->control_area.exitcode;
701 l->exitinfo1 = vmcb_s->control_area.exitinfo1;
702 l->exitinfo2 = vmcb_s->control_area.exitinfo2;
703 l->rip = vmcb_s->state_save_area.rip;
706 return commit_result(L4_error::None);
711 Vm_svm::invoke(L4_obj_ref obj, Mword rights, Syscall_frame *f, Utcb *utcb)
713 vm_invoke<Vm_svm>(obj, rights, f, utcb);
716 // ------------------------------------------------------------------------
717 IMPLEMENTATION [svm && debug]:
721 Vm_svm::log_fmt_svm(Tb_entry *e, int max, char *buf)
723 Log_vm_svm_exit *l = e->payload<Log_vm_svm_exit>();
724 return snprintf(buf, max, "ec=%lx ei1=%08lx ei2=%08lx rip=%08lx",
725 l->exitcode, l->exitinfo1, l->exitinfo2, l->rip);