]> rtime.felk.cvut.cz Git - l4.git/blobdiff - kernel/fiasco/src/kern/ia32/svm.cpp
Update
[l4.git] / kernel / fiasco / src / kern / ia32 / svm.cpp
index 65ae3ed0247a7ffa300027620ef1ea7fbf96dae1..a554d02417f7102e5fe7cf849873bfe39b3a3785 100644 (file)
@@ -10,8 +10,9 @@ INTERFACE[svm]:
 #include "per_cpu_data.h"
 #include "virt.h"
 #include "cpu_lock.h"
+#include "pm.h"
 
-EXTENSION class Svm
+EXTENSION class Svm : public Pm_object
 {
 public:
   static Per_cpu<Svm> cpus;
@@ -25,15 +26,18 @@ public:
   };
 
 private:
-  void *_vm_hsave_area;
-  void *_iopm;
-  void *_msrpm;
+  Vmcb const *_last_user_vmcb;
   Unsigned32 _next_asid;
   Unsigned32 _global_asid_generation;
-  Unsigned32 _max_asid;
   bool _flush_all_asids;
+
+  /* read mostly below */
+  Unsigned32 _max_asid;
   bool _svm_enabled;
   bool _has_npt;
+  void *_vm_hsave_area;
+  void *_iopm;
+  void *_msrpm;
   Unsigned64 _iopm_base_pa;
   Unsigned64 _msrpm_base_pa;
   Vmcb *_kernel_vmcb;
@@ -67,12 +71,48 @@ IMPLEMENTATION[svm]:
 #include "warn.h"
 #include <cstring>
 
-DEFINE_PER_CPU Per_cpu<Svm> Svm::cpus(true);
+DEFINE_PER_CPU_LATE Per_cpu<Svm> Svm::cpus(Per_cpu_data::Cpu_num);
 
 PUBLIC
-Svm::Svm(unsigned cpu)
+void
+Svm::pm_on_suspend(Cpu_number)
+{
+  // FIXME: Handle VMCB caching stuff iff enabled
+}
+
+PUBLIC
+void
+Svm::pm_on_resume(Cpu_number)
+{
+  Unsigned64 efer = Cpu::rdmsr(MSR_EFER);
+  efer |= 1 << 12;
+  Cpu::wrmsr(efer, MSR_EFER);
+  Unsigned64 vm_hsave_pa = Kmem::virt_to_phys(_vm_hsave_area);
+  Cpu::wrmsr(vm_hsave_pa, MSR_VM_HSAVE_PA);
+  _last_user_vmcb = 0;
+}
+
+PUBLIC static inline NEEDS["cpu.h"]
+bool
+Svm::cpu_svm_available(Cpu_number cpu)
 {
   Cpu &c = Cpu::cpus.cpu(cpu);
+
+  if (!c.online() || !c.svm())
+    return false;
+
+  Unsigned64 vmcr;
+  vmcr = c.rdmsr(MSR_VM_CR);
+  if (vmcr & (1 << 4)) // VM_CR.SVMDIS
+    return false;
+  return true;
+}
+
+PUBLIC
+Svm::Svm(Cpu_number cpu)
+{
+  Cpu &c = Cpu::cpus.cpu(cpu);
+  _last_user_vmcb = 0;
   _svm_enabled = false;
   _next_asid = 1;
   _global_asid_generation = 0;
@@ -80,20 +120,12 @@ Svm::Svm(unsigned cpu)
   _flush_all_asids = true;
   _has_npt = false;
 
-  if (!c.svm())
+  if (!cpu_svm_available(cpu))
     return;
 
-  Unsigned64 efer, vmcr;
-
-  vmcr = c.rdmsr(MSR_VM_CR);
-  if (vmcr & (1 << 4)) // VM_CR.SVMDIS
-    {
-      printf("SVM supported but locked.\n");
-      return;
-    }
-
-  printf("Enabling SVM support\n");
+  printf("SVM: enabled\n");
 
+  Unsigned64 efer;
   efer = c.rdmsr(MSR_EFER);
   efer |= 1 << 12;
   c.wrmsr(efer, MSR_EFER);
@@ -102,11 +134,13 @@ Svm::Svm(unsigned cpu)
   c.cpuid (0x8000000a, &eax, &ebx, &ecx, &edx);
   if (edx & 1)
     {
-      printf("Nested Paging supported\n");
+      printf("SVM: nested paging supported\n");
       _has_npt = true;
     }
-  printf("NASID: 0x%x\n", ebx);
+  printf("SVM: NASID: %u\n", ebx);
   _max_asid = ebx - 1;
+
+  // FIXME: MUST NOT PANIC ON CPU HOTPLUG
   assert(_max_asid > 0);
 
   enum
@@ -118,6 +152,7 @@ Svm::Svm(unsigned cpu)
   };
 
   /* 16kB IO permission map and Vmcb (16kB are good for the buddy allocator)*/
+  // FIXME: MUST NOT PANIC ON CPU HOTPLUG
   check(_iopm = Kmem_alloc::allocator()->unaligned_alloc(Io_pm_size + Vmcb_size));
   _iopm_base_pa = Kmem::virt_to_phys(_iopm);
   _kernel_vmcb = (Vmcb*)((char*)_iopm + Io_pm_size);
@@ -131,6 +166,7 @@ Svm::Svm(unsigned cpu)
   memset(_kernel_vmcb, 0, Vmcb_size);
 
   /* 8kB MSR permission map */
+  // FIXME: MUST NOT PANIC ON CPU HOTPLUG
   check(_msrpm = Kmem_alloc::allocator()->unaligned_alloc(Msr_pm_size));
   _msrpm_base_pa = Kmem::virt_to_phys(_msrpm);
   memset(_msrpm, ~0, Msr_pm_size);
@@ -139,12 +175,20 @@ Svm::Svm(unsigned cpu)
   set_msr_perm(MSR_SYSENTER_CS, Msr_rw);
   set_msr_perm(MSR_SYSENTER_EIP, Msr_rw);
   set_msr_perm(MSR_SYSENTER_ESP, Msr_rw);
+  set_msr_perm(MSR_GS_BASE, Msr_rw);
+  set_msr_perm(MSR_FS_BASE, Msr_rw);
+  set_msr_perm(MSR_KERNEL_GS_BASE, Msr_rw);
+  set_msr_perm(MSR_CSTAR, Msr_rw);
+  set_msr_perm(MSR_LSTAR, Msr_rw);
+  set_msr_perm(MSR_SFMASK, Msr_rw);
 
   /* 4kB Host state-safe area */
+  // FIXME: MUST NOT PANIC ON CPU HOTPLUG
   check(_vm_hsave_area = Kmem_alloc::allocator()->unaligned_alloc(State_save_area_size));
   Unsigned64 vm_hsave_pa = Kmem::virt_to_phys(_vm_hsave_area);
 
   c.wrmsr(vm_hsave_pa, MSR_VM_HSAVE_PA);
+  register_pm(cpu);
 }
 
 PUBLIC
@@ -183,10 +227,24 @@ Unsigned64
 Svm::msrpm_base_pa()
 { return _msrpm_base_pa; }
 
-PUBLIC
+/**
+ * \pre user_vmcb must be a uniqe address across all address spaces
+ *      (e.g., a kernel KU-mem address)
+ */
+PUBLIC inline
 Vmcb *
-Svm::kernel_vmcb()
-{ return _kernel_vmcb; }
+Svm::kernel_vmcb(Vmcb const *user_vmcb)
+{
+  if (user_vmcb != _last_user_vmcb)
+    {
+      _kernel_vmcb->control_area.clean_bits.raw = 0;
+      _last_user_vmcb = user_vmcb;
+    }
+  else
+    _kernel_vmcb->control_area.clean_bits = access_once(&user_vmcb->control_area.clean_bits);
+
+  return _kernel_vmcb;
+}
 
 PUBLIC
 Address
@@ -205,38 +263,39 @@ Svm::has_npt()
 
 PUBLIC
 bool
-Svm::asid_valid (Unsigned32 asid, Unsigned32 generation)
+Svm::asid_valid(Unsigned32 asid, Unsigned32 generation)
 {
   return ((asid > 0) &&
           (asid <= _max_asid) &&
           (generation <= _global_asid_generation));
 }
 
-PUBLIC
-bool
-Svm::flush_all_asids()
-{ return _flush_all_asids; }
-
-PUBLIC
+PUBLIC inline
 void
-Svm::flush_all_asids(bool val)
-{ _flush_all_asids = val; }
+Svm::flush_asids_if_needed()
+{
+  if (EXPECT_TRUE(!_flush_all_asids))
+    return;
 
-PUBLIC
+  _flush_all_asids = false;
+  _kernel_vmcb->control_area.tlb_ctl |= 1;
+}
+
+PUBLIC inline
 Unsigned32
-Svm::global_asid_generation()
+Svm::global_asid_generation() const
 { return _global_asid_generation; }
 
 PUBLIC
 Unsigned32
-Svm::next_asid ()
+Svm::next_asid()
 {
-  assert(cpu_lock.test());
-  _flush_all_asids = false;
+  assert (cpu_lock.test());
   if (_next_asid > _max_asid)
     {
       _global_asid_generation++;
       _next_asid = 1;
+      // FIXME: must not crash on an overrun
       assert (_global_asid_generation < ~0U);
       _flush_all_asids = true;
     }