7 //-----------------------------------------------------------------------------
10 #include "per_cpu_data.h"
17 static Per_cpu<Svm> cpus;
31 Unsigned32 _next_asid;
32 Unsigned32 _global_asid_generation;
34 bool _flush_all_asids;
37 Unsigned64 _iopm_base_pa;
38 Unsigned64 _msrpm_base_pa;
40 Address _kernel_vmcb_pa;
43 //-----------------------------------------------------------------------------
44 INTERFACE [svm && ia32]:
49 enum { Gpregs_words = 10 };
52 //-----------------------------------------------------------------------------
53 INTERFACE [svm && amd64]:
58 enum { Gpregs_words = 18 };
61 // -----------------------------------------------------------------------
70 Per_cpu<Svm> DEFINE_PER_CPU Svm::cpus(true);
73 Svm::Svm(unsigned cpu)
75 Cpu &c = Cpu::cpus.cpu(cpu);
78 _global_asid_generation = 0;
80 _flush_all_asids = true;
86 Unsigned64 efer, vmcr;
88 vmcr = c.rdmsr(MSR_VM_CR);
89 if (vmcr & (1 << 4)) // VM_CR.SVMDIS
91 printf("SVM supported but locked.\n");
95 printf("Enabling SVM support\n");
97 efer = c.rdmsr(MSR_EFER);
99 c.wrmsr(efer, MSR_EFER);
101 Unsigned32 eax, ebx, ecx, edx;
102 c.cpuid (0x8000000a, &eax, &ebx, &ecx, &edx);
105 printf("Nested Paging supported\n");
108 printf("NASID: 0x%x\n", ebx);
110 assert(_max_asid > 0);
111 // we internally use Unigned8 for asids to save per cpu data
112 // your machine supports more asids
113 assert(_max_asid < (1<<8));
119 Msr_pm_size = 0x2000,
120 State_save_area_size = 0x1000,
123 /* 16kB IO permission map and Vmcb (16kB are good for the buddy allocator)*/
124 check(_iopm = Mapped_allocator::allocator()->unaligned_alloc(Io_pm_size + Vmcb_size));
125 _iopm_base_pa = Kmem::virt_to_phys(_iopm);
126 _kernel_vmcb = (Vmcb*)((char*)_iopm + Io_pm_size);
127 _kernel_vmcb_pa = Kmem::virt_to_phys(_kernel_vmcb);
130 /* disbale all ports */
131 memset(_iopm, ~0, Io_pm_size);
134 memset(_kernel_vmcb, 0, Vmcb_size);
136 /* 8kB MSR permission map */
137 check(_msrpm = Mapped_allocator::allocator()->unaligned_alloc(Msr_pm_size));
138 _msrpm_base_pa = Kmem::virt_to_phys(_msrpm);
139 memset(_msrpm, ~0, Msr_pm_size);
141 // allow the sysenter MSRs for the guests
142 set_msr_perm(MSR_SYSENTER_CS, Msr_rw);
143 set_msr_perm(MSR_SYSENTER_EIP, Msr_rw);
144 set_msr_perm(MSR_SYSENTER_ESP, Msr_rw);
146 /* 4kB Host state-safe area */
147 check(_vm_hsave_area = Mapped_allocator::allocator()->unaligned_alloc(State_save_area_size));
148 Unsigned64 vm_hsave_pa = Kmem::virt_to_phys(_vm_hsave_area);
150 c.wrmsr(vm_hsave_pa, MSR_VM_HSAVE_PA);
155 Svm::set_msr_perm(Unsigned32 msr, Msr_perms perms)
160 else if (0xc0000000 <= msr && msr <= 0xc0001fff)
162 else if (0xc0010000 <= msr && msr <= 0xc0011fff)
166 WARN("Illegal MSR %x\n", msr);
173 unsigned char *pm = (unsigned char *)_msrpm;
175 unsigned shift = (msr & 3) * 2;
176 pm[offs] = (pm[offs] & ~(3 << shift)) | ((unsigned char)perms << shift);
182 { return _iopm_base_pa; }
187 { return _msrpm_base_pa; }
192 { return _kernel_vmcb; }
196 Svm::kernel_vmcb_pa()
197 { return _kernel_vmcb_pa; }
202 { return _svm_enabled; }
211 Svm::asid_valid (Unsigned32 asid, Unsigned32 generation)
213 return ((asid > 0) &&
214 (asid <= _max_asid) &&
215 (generation <= _global_asid_generation));
220 Svm::flush_all_asids()
221 { return _flush_all_asids; }
225 Svm::flush_all_asids(bool val)
226 { _flush_all_asids = val; }
230 Svm::global_asid_generation()
231 { return _global_asid_generation; }
237 assert (cpu_lock.test());
238 _flush_all_asids = false;
239 if (_next_asid > _max_asid) {
240 _global_asid_generation++;
242 assert (_global_asid_generation < ~0U);
243 _flush_all_asids = true;