]> rtime.felk.cvut.cz Git - l4.git/blobdiff - kernel/fiasco/src/kern/kmem_slab_simple.cpp
update
[l4.git] / kernel / fiasco / src / kern / kmem_slab_simple.cpp
index e5d30ffd0009e4998a4e42207d6f5198c3e6f22b..c87ac3372238099ad1485eb695b02ebd4c8b5ba9 100644 (file)
@@ -1,6 +1,7 @@
 INTERFACE:
 
 #include <cstddef>             // size_t
+#include "config.h"
 //#include "helping_lock.h"    // Helping_lock
 #include "lock_guard.h"
 #include "spin_lock.h"
@@ -13,14 +14,20 @@ class Kmem_slab_simple : public slab_cache_anon
 
   // DATA
   //typedef Helping_lock Lock;
-  typedef Spin_lock Lock;
-  Lock _lock;
   Kmem_slab_simple* _reap_next;
 
   // STATIC DATA
   static Kmem_slab_simple* reap_list;
 };
 
+template< typename T >
+class Kmem_slab_t : public Kmem_slab_simple
+{
+public:
+  explicit Kmem_slab_t(char const *name)
+  : Kmem_slab_simple(sizeof(T), __alignof(T), name) {}
+};
+
 IMPLEMENTATION:
 
 Kmem_slab_simple* Kmem_slab_simple::reap_list;
@@ -39,40 +46,28 @@ Kmem_slab_simple* Kmem_slab_simple::reap_list;
 #include "panic.h"
 #include "mapped_alloc.h"
 
-// We only support slab size == PAGE_SIZE.
-PUBLIC
-Kmem_slab_simple::Kmem_slab_simple(unsigned elem_size, unsigned alignment,
-                                  char const *name)
-  : slab_cache_anon(Config::PAGE_SIZE, elem_size, alignment, name)
-{
-  _lock.init();
-  enqueue_reap_list();
-}
-
 // Specializations providing their own block_alloc()/block_free() can
 // also request slab sizes larger than one page.
 PROTECTED
-Kmem_slab_simple::Kmem_slab_simple(unsigned long slab_size, 
-                                  unsigned elem_size, 
+Kmem_slab_simple::Kmem_slab_simple(unsigned long slab_size,
+                                  unsigned elem_size,
                                   unsigned alignment,
                                   char const *name)
   : slab_cache_anon(slab_size, elem_size, alignment, name)
 {
-  _lock.init();
   enqueue_reap_list();
 }
 
 // Specializations providing their own block_alloc()/block_free() can
 // also request slab sizes larger than one page.
-PROTECTED
-Kmem_slab_simple::Kmem_slab_simple(unsigned elem_size, 
+PUBLIC
+Kmem_slab_simple::Kmem_slab_simple(unsigned elem_size,
                                   unsigned alignment,
                                   char const *name,
-                                  unsigned long min_size,
-                                  unsigned long max_size)
+                                  unsigned long min_size = Config::PAGE_SIZE,
+                                  unsigned long max_size = Config::PAGE_SIZE * 32)
   : slab_cache_anon(elem_size, alignment, name, min_size, max_size)
 {
-  _lock.init();
   enqueue_reap_list();
 }
 
@@ -87,7 +82,6 @@ Kmem_slab_simple::enqueue_reap_list()
 PUBLIC
 Kmem_slab_simple::~Kmem_slab_simple()
 {
-  Lock_guard<Lock> guard(&_lock);
   destroy();
 }
 
@@ -96,15 +90,13 @@ PUBLIC
 void *
 Kmem_slab_simple::alloc()              // request initialized member from cache
 {
-  Lock_guard<Lock> guard(&_lock);
   return slab_cache_anon::alloc();
 }
 
 PUBLIC
-void 
+void
 Kmem_slab_simple::free(void *cache_entry) // return initialized member to cache
 {
-  Lock_guard<Lock> guard(&_lock);
   slab_cache_anon::free(cache_entry);
 }
 
@@ -112,10 +104,6 @@ PUBLIC
 unsigned long
 Kmem_slab_simple::reap()
 {
-  if (_lock.test())
-    return 0;                  // this cache is locked -- can't get memory now
-
-  Lock_guard<Lock> guard(&_lock);
   return slab_cache_anon::reap();
 }
 
@@ -125,59 +113,15 @@ Kmem_slab_simple::reap()
 virtual void *
 Kmem_slab_simple::block_alloc(unsigned long size, unsigned long)
 {
-  // size must be exactly PAGE_SIZE
-  assert(size == Config::PAGE_SIZE);
+  assert (size >= Config::PAGE_SIZE && !(size & (size - 1)));
   (void)size;
-
-  return Mapped_allocator::allocator()->alloc(Config::PAGE_SHIFT);
-}
-
-virtual void 
-Kmem_slab_simple::block_free(void *block, unsigned long)
-{
-  Mapped_allocator::allocator()->free(Config::PAGE_SHIFT,block);
-}
-
-// memory management
-
-static void *slab_mem = 0;
-
-PUBLIC
-void *
-Kmem_slab_simple::operator new(size_t size)
-{
-//#warning do we really need dynamic allocation of slab allocators?
-  assert(size<=sizeof(Kmem_slab_simple));
-  (void)size; // prevent gcc warning
-  if(!slab_mem)
-    {
-      slab_mem = Mapped_allocator::allocator()->alloc(Config::PAGE_SHIFT);
-      if(!slab_mem)
-       panic("Out of memory (new Kmem_slab_simple)");
-
-      char* s;
-      for( s = (char*)slab_mem; 
-         s < ((char*)slab_mem) + Config::PAGE_SIZE - sizeof(Kmem_slab_simple); 
-         s+=sizeof(Kmem_slab_simple) ) 
-       {
-         *((void**)s) = s+sizeof(Kmem_slab_simple);
-       }
-
-      *((void**)s) = 0;
-    }
-
-  void *sl = slab_mem;
-  slab_mem = *((void**)slab_mem);
-  return sl;
-  
+  return Mapped_allocator::allocator()->unaligned_alloc(size);
 }
 
-PUBLIC
-void 
-Kmem_slab_simple::operator delete(void *block) 
+virtual void
+Kmem_slab_simple::block_free(void *block, unsigned long size)
 {
-  *((void**)block) = slab_mem;
-  slab_mem = block;
+  Mapped_allocator::allocator()->unaligned_free(size, block);
 }
 
 // 
@@ -205,5 +149,4 @@ Kmem_slab_simple::reap_all (bool desperate)
   return freed;
 }
 
-static Mapped_alloc_reaper 
-  kmem_slab_simple_reaper (Kmem_slab_simple::reap_all);
+static Mapped_alloc_reaper kmem_slab_simple_reaper(Kmem_slab_simple::reap_all);