7 //-----------------------------------------------------------------------------
10 #include "per_cpu_data.h"
17 static Per_cpu<Svm> cpus;
31 Unsigned32 _next_asid;
32 Unsigned32 _global_asid_generation;
34 bool _flush_all_asids;
37 Unsigned64 _iopm_base_pa;
38 Unsigned64 _msrpm_base_pa;
40 Address _kernel_vmcb_pa;
43 //-----------------------------------------------------------------------------
44 INTERFACE [svm && ia32]:
49 enum { Gpregs_words = 10 };
52 //-----------------------------------------------------------------------------
53 INTERFACE [svm && amd64]:
58 enum { Gpregs_words = 18 };
61 // -----------------------------------------------------------------------
70 DEFINE_PER_CPU Per_cpu<Svm> Svm::cpus(true);
73 Svm::Svm(unsigned cpu)
75 Cpu &c = Cpu::cpus.cpu(cpu);
78 _global_asid_generation = 0;
80 _flush_all_asids = true;
86 Unsigned64 efer, vmcr;
88 vmcr = c.rdmsr(MSR_VM_CR);
89 if (vmcr & (1 << 4)) // VM_CR.SVMDIS
91 printf("SVM supported but locked.\n");
95 printf("Enabling SVM support\n");
97 efer = c.rdmsr(MSR_EFER);
99 c.wrmsr(efer, MSR_EFER);
101 Unsigned32 eax, ebx, ecx, edx;
102 c.cpuid (0x8000000a, &eax, &ebx, &ecx, &edx);
105 printf("Nested Paging supported\n");
108 printf("NASID: 0x%x\n", ebx);
110 assert(_max_asid > 0);
116 Msr_pm_size = 0x2000,
117 State_save_area_size = 0x1000,
120 /* 16kB IO permission map and Vmcb (16kB are good for the buddy allocator)*/
121 check(_iopm = Kmem_alloc::allocator()->unaligned_alloc(Io_pm_size + Vmcb_size));
122 _iopm_base_pa = Kmem::virt_to_phys(_iopm);
123 _kernel_vmcb = (Vmcb*)((char*)_iopm + Io_pm_size);
124 _kernel_vmcb_pa = Kmem::virt_to_phys(_kernel_vmcb);
127 /* disbale all ports */
128 memset(_iopm, ~0, Io_pm_size);
131 memset(_kernel_vmcb, 0, Vmcb_size);
133 /* 8kB MSR permission map */
134 check(_msrpm = Kmem_alloc::allocator()->unaligned_alloc(Msr_pm_size));
135 _msrpm_base_pa = Kmem::virt_to_phys(_msrpm);
136 memset(_msrpm, ~0, Msr_pm_size);
138 // allow the sysenter MSRs for the guests
139 set_msr_perm(MSR_SYSENTER_CS, Msr_rw);
140 set_msr_perm(MSR_SYSENTER_EIP, Msr_rw);
141 set_msr_perm(MSR_SYSENTER_ESP, Msr_rw);
143 /* 4kB Host state-safe area */
144 check(_vm_hsave_area = Kmem_alloc::allocator()->unaligned_alloc(State_save_area_size));
145 Unsigned64 vm_hsave_pa = Kmem::virt_to_phys(_vm_hsave_area);
147 c.wrmsr(vm_hsave_pa, MSR_VM_HSAVE_PA);
152 Svm::set_msr_perm(Unsigned32 msr, Msr_perms perms)
157 else if (0xc0000000 <= msr && msr <= 0xc0001fff)
159 else if (0xc0010000 <= msr && msr <= 0xc0011fff)
163 WARN("Illegal MSR %x\n", msr);
170 unsigned char *pm = (unsigned char *)_msrpm;
172 unsigned shift = (msr & 3) * 2;
173 pm[offs] = (pm[offs] & ~(3 << shift)) | ((unsigned char)perms << shift);
179 { return _iopm_base_pa; }
184 { return _msrpm_base_pa; }
189 { return _kernel_vmcb; }
193 Svm::kernel_vmcb_pa()
194 { return _kernel_vmcb_pa; }
199 { return _svm_enabled; }
208 Svm::asid_valid (Unsigned32 asid, Unsigned32 generation)
210 return ((asid > 0) &&
211 (asid <= _max_asid) &&
212 (generation <= _global_asid_generation));
217 Svm::flush_all_asids()
218 { return _flush_all_asids; }
222 Svm::flush_all_asids(bool val)
223 { _flush_all_asids = val; }
227 Svm::global_asid_generation()
228 { return _global_asid_generation; }
234 assert(cpu_lock.test());
235 _flush_all_asids = false;
236 if (_next_asid > _max_asid)
238 _global_asid_generation++;
240 assert (_global_asid_generation < ~0U);
241 _flush_all_asids = true;