7 //-----------------------------------------------------------------------------
10 #include "per_cpu_data.h"
17 static Per_cpu<Svm> cpus;
31 Unsigned32 _next_asid;
32 Unsigned32 _global_asid_generation;
34 bool _flush_all_asids;
37 Unsigned64 _iopm_base_pa;
38 Unsigned64 _msrpm_base_pa;
40 Address _kernel_vmcb_pa;
43 //-----------------------------------------------------------------------------
44 INTERFACE [svm && ia32]:
49 enum { Gpregs_words = 10 };
52 //-----------------------------------------------------------------------------
53 INTERFACE [svm && amd64]:
58 enum { Gpregs_words = 18 };
61 // -----------------------------------------------------------------------
70 DEFINE_PER_CPU Per_cpu<Svm> Svm::cpus(true);
73 Svm::Svm(unsigned cpu)
75 Cpu &c = Cpu::cpus.cpu(cpu);
78 _global_asid_generation = 0;
80 _flush_all_asids = true;
86 Unsigned64 efer, vmcr;
88 vmcr = c.rdmsr(MSR_VM_CR);
89 if (vmcr & (1 << 4)) // VM_CR.SVMDIS
91 printf("SVM supported but locked.\n");
95 printf("Enabling SVM support\n");
97 efer = c.rdmsr(MSR_EFER);
99 c.wrmsr(efer, MSR_EFER);
101 Unsigned32 eax, ebx, ecx, edx;
102 c.cpuid (0x8000000a, &eax, &ebx, &ecx, &edx);
105 printf("Nested Paging supported\n");
108 printf("NASID: 0x%x\n", ebx);
111 // FIXME: MUST NOT PANIC ON CPU HOTPLUG
112 assert(_max_asid > 0);
118 Msr_pm_size = 0x2000,
119 State_save_area_size = 0x1000,
122 /* 16kB IO permission map and Vmcb (16kB are good for the buddy allocator)*/
123 // FIXME: MUST NOT PANIC ON CPU HOTPLUG
124 check(_iopm = Kmem_alloc::allocator()->unaligned_alloc(Io_pm_size + Vmcb_size));
125 _iopm_base_pa = Kmem::virt_to_phys(_iopm);
126 _kernel_vmcb = (Vmcb*)((char*)_iopm + Io_pm_size);
127 _kernel_vmcb_pa = Kmem::virt_to_phys(_kernel_vmcb);
130 /* disbale all ports */
131 memset(_iopm, ~0, Io_pm_size);
134 memset(_kernel_vmcb, 0, Vmcb_size);
136 /* 8kB MSR permission map */
137 // FIXME: MUST NOT PANIC ON CPU HOTPLUG
138 check(_msrpm = Kmem_alloc::allocator()->unaligned_alloc(Msr_pm_size));
139 _msrpm_base_pa = Kmem::virt_to_phys(_msrpm);
140 memset(_msrpm, ~0, Msr_pm_size);
142 // allow the sysenter MSRs for the guests
143 set_msr_perm(MSR_SYSENTER_CS, Msr_rw);
144 set_msr_perm(MSR_SYSENTER_EIP, Msr_rw);
145 set_msr_perm(MSR_SYSENTER_ESP, Msr_rw);
147 /* 4kB Host state-safe area */
148 // FIXME: MUST NOT PANIC ON CPU HOTPLUG
149 check(_vm_hsave_area = Kmem_alloc::allocator()->unaligned_alloc(State_save_area_size));
150 Unsigned64 vm_hsave_pa = Kmem::virt_to_phys(_vm_hsave_area);
152 c.wrmsr(vm_hsave_pa, MSR_VM_HSAVE_PA);
157 Svm::set_msr_perm(Unsigned32 msr, Msr_perms perms)
162 else if (0xc0000000 <= msr && msr <= 0xc0001fff)
164 else if (0xc0010000 <= msr && msr <= 0xc0011fff)
168 WARN("Illegal MSR %x\n", msr);
175 unsigned char *pm = (unsigned char *)_msrpm;
177 unsigned shift = (msr & 3) * 2;
178 pm[offs] = (pm[offs] & ~(3 << shift)) | ((unsigned char)perms << shift);
184 { return _iopm_base_pa; }
189 { return _msrpm_base_pa; }
194 { return _kernel_vmcb; }
198 Svm::kernel_vmcb_pa()
199 { return _kernel_vmcb_pa; }
204 { return _svm_enabled; }
213 Svm::asid_valid (Unsigned32 asid, Unsigned32 generation)
215 return ((asid > 0) &&
216 (asid <= _max_asid) &&
217 (generation <= _global_asid_generation));
222 Svm::flush_all_asids()
223 { return _flush_all_asids; }
227 Svm::flush_all_asids(bool val)
228 { _flush_all_asids = val; }
232 Svm::global_asid_generation()
233 { return _global_asid_generation; }
239 assert(cpu_lock.test());
240 _flush_all_asids = false;
241 if (_next_asid > _max_asid)
243 _global_asid_generation++;
245 // FIXME: must not crash on an overrun
246 assert (_global_asid_generation < ~0U);
247 _flush_all_asids = true;