5 // The anonymous slab allocator. You can specialize this allocator by
6 // providing your own initialization functions and your own low-level
7 // allocation functions.
16 // Low-level allocator functions:
18 // Allocate/free a block. "size" is always a multiple of PAGE_SIZE.
19 virtual void *block_alloc(unsigned long size, unsigned long alignment) = 0;
20 virtual void block_free(void *block, unsigned long size) = 0;
23 typedef Spin_lock Lock;
27 slab_cache_anon(const slab_cache_anon&); // default constructor is undefined
30 // data declaration follows
33 // The slabs of this cache are held in a partially-sorted
34 // doubly-linked list. First come the fully-active slabs (all
35 // elements in use), then the partially active slabs, then empty
37 slab *_first_slab, *_first_available_slab, *_last_slab;
38 unsigned long _slab_size;
39 unsigned _elem_size, _latest_offset, _alignment;
41 }; // end declaration of class slab_cache
48 #include <lock_guard.h>
50 #ifndef offsetof // should be defined in stddef.h, but isn't
51 #define offsetof(TYPE, MEMBER) (((size_t) &((TYPE *)10)->MEMBER) - 10)
58 class slab // a slab of the cache
61 slab(const slab&); // default constructors remain undefined
65 slab_entry *_next_free;
71 slab_cache_anon *_cache;
72 slab_entry *_first_free;
74 unsigned short _in_use, _free;
77 // slabs for CACHE_ENTRY should contain at least min_cache_items
79 static const unsigned min_cache_items = 4;
82 // data declaration follows
85 }; // end declaration of class slab
91 // default deallocator must not be called -- must use explicit destruction
94 slab::operator delete(void* /*block*/)
96 assert (!"slab::operator delete called");
101 slab::slab(slab_cache_anon *cache)
103 _data._cache = cache;
105 _data._next = _data._prev = 0;
107 // Compute offset of first slab_entry in slab, not taking into
108 // account the colorization offset. "slab_entry._entry[]" needs to
109 // be "cache->_alignment"-aligned
110 unsigned long offset_first_elem =
111 ((sizeof(slab_data) + sizeof (slab_entry) + cache->_alignment - 1)
112 & ~(cache->_alignment - 1))
113 - sizeof (slab_entry);
115 // Compute size of a slab entry, including alignment padding at end
117 unsigned entry_size =
118 (sizeof(slab_entry) + cache->_elem_size + cache->_alignment - 1)
119 & ~(cache->_alignment - 1);
121 // Compute number of elements fitting into a slab
123 (cache->_slab_size - offset_first_elem) / entry_size;
125 // Compute pointer to first data element, now taking into account
126 // the latest colorization offset
128 reinterpret_cast<char*>(this) + offset_first_elem + cache->_latest_offset;
130 // Update the colorization offset
131 cache->_latest_offset += cache->_alignment;
132 if (offset_first_elem + cache->_latest_offset + entry_size * elem_num
135 cache->_latest_offset = 0;
138 // Initialize the cache elements
139 slab_entry *e = 0, *e_prev = 0;
141 for (unsigned i = 0; i < elem_num; i++)
143 e = reinterpret_cast<slab_entry *>(data);
145 e->_next_free = e_prev;
146 cache->elem_ctor(& e->_entry[0]);
151 (sizeof(slab_entry) + cache->_elem_size + cache->_alignment - 1)
152 & ~(cache->_alignment - 1);
155 _data._first_free = e;
156 _data._free = elem_num;
163 slab_entry *e = _data._first_free;
168 _data._first_free = e->_next_free;
177 slab::free(void *entry)
179 slab_entry *e = reinterpret_cast<slab_entry *>
180 (reinterpret_cast<char*>(entry) - offsetof(slab_entry, _entry));
182 e->_next_free = _data._first_free;
183 _data._first_free = e;
185 assert(_data._in_use);
194 return _data._in_use == 0;
201 return _data._free == 0;
208 return _data._in_use;
213 slab::enqueue(slab *prev)
217 if ((_data._next = prev->_data._next))
218 _data._next->_data._prev = this;
220 _data._prev->_data._next = this;
228 _data._prev->_data._next = _data._next;
230 _data._next->_data._prev = _data._prev;
232 _data._prev = _data._next = 0;
251 slab::operator new(size_t,
252 slab_cache_anon *cache) throw()
254 // slabs must be size-aligned so that we can compute their addresses
255 // from element addresses
256 return cache->block_alloc(cache->_slab_size, cache->_slab_size);
261 slab::num_elems(unsigned long slab_size,
265 // Compute offset of first slab_entry in slab, not taking into
266 // account the colorization offset. "slab_entry._entry[]" needs to
267 // be "cache->_alignment"-aligned
268 unsigned long offset_first_elem =
269 ((sizeof(slab::slab_data) + sizeof (slab::slab_entry) + alignment - 1)
271 - sizeof (slab::slab_entry);
273 // Compute size of a slab entry, including alignment padding at end
275 unsigned entry_size =
276 (sizeof(slab::slab_entry) + elem_size + alignment - 1)
279 // Compute number of elements fitting into a slab
280 return (slab_size - offset_first_elem) / entry_size;
285 slab_cache_anon::num_elems(unsigned long slab_size,
288 { return slab::num_elems(slab_size, elem_size, alignment); }
294 slab_cache_anon::slab_cache_anon(unsigned elem_size,
297 unsigned long min_size,
298 unsigned long max_size)
299 : _first_slab(0), _first_available_slab(0), _last_slab(0),
300 _elem_size(elem_size),
301 _latest_offset(0), _alignment(alignment),
307 _slab_size = min_size;
308 num_elems(_slab_size, elem_size, alignment) < 8
309 && _slab_size < max_size;
317 slab_cache_anon::slab_cache_anon(unsigned long slab_size,
321 : _first_slab(0), _first_available_slab(0), _last_slab(0),
322 _slab_size(slab_size), _elem_size(elem_size),
323 _latest_offset(0), _alignment(alignment),
331 slab_cache_anon::~slab_cache_anon()
333 // the derived class should call destroy() before deleting us.
334 // assert(_first_slab == 0);
339 slab_cache_anon::destroy() // descendant should call this in destructor
342 slab *n, *s = _first_slab;
348 // explicitly call destructor to delete s;
350 block_free(s, _slab_size);
361 slab_cache_anon::alloc() // request initialized member from cache
363 Lock_guard<Lock> guard(&lock);
365 if (! _first_available_slab)
367 slab *s = new (this) slab(this);
371 _first_available_slab = s;
375 assert(_last_slab->is_full());
377 _first_available_slab->enqueue(_last_slab);
378 _last_slab = _first_available_slab;
380 else // this was the first slab we allocated
381 _first_slab = _last_slab = _first_available_slab;
384 assert(_first_available_slab
385 && ! _first_available_slab->is_full());
386 assert(! _first_available_slab->prev()
387 || _first_available_slab->prev()->is_full());
389 void *ret = _first_available_slab->alloc();
392 if (_first_available_slab->is_full())
393 _first_available_slab = _first_available_slab->next();
398 PUBLIC template< typename Q >
401 slab_cache_anon::q_alloc(Q *quota)
403 if (EXPECT_FALSE(!quota->alloc(_elem_size)))
407 if (EXPECT_FALSE(!(r=alloc())))
409 quota->free(_elem_size);
418 slab_cache_anon::free(void *cache_entry) // return initialized member to cache
420 Lock_guard<Lock> guard(&lock);
422 slab *s = reinterpret_cast<slab*>
423 (reinterpret_cast<unsigned long>(cache_entry) & ~(_slab_size - 1));
425 bool was_full = s->is_full();
427 s->free(cache_entry);
431 if (s->next() == 0) // have no right neighbor?
433 assert(! _first_available_slab);
435 else if (s->next()->is_full()) // right neigbor full?
437 // We requeue to become the first non-full slab in the queue
438 // so that all full slabs are at the beginning of the queue.
440 if (s == _first_slab)
441 _first_slab = s->next();
442 // don't care about _first_available_slab, _last_slab --
443 // they cannot point to s because we were full and have a
448 if (_first_available_slab)
450 // _first_available_slab->prev() is defined because we
451 // had a right neighbor which is full, that is,
452 // _first_available_slab wasn't our right neighbor and
453 // now isn't the first slab in the queue
454 assert(_first_available_slab->prev()->is_full());
455 s->enqueue(_first_available_slab->prev());
459 // all slabs were full
460 assert(_last_slab->is_full());
461 s->enqueue(_last_slab);
466 _first_available_slab = s;
469 else if (s->is_empty())
471 if (s->next() && (! s->next()->is_empty())) // right neighbor not empty?
473 // Move to tail of list
475 if (s == _first_slab)
476 _first_slab = s->next();
477 if (s == _first_available_slab)
478 _first_available_slab = s->next();
479 // don't care about _last_slab because we know we have a
484 s->enqueue(_last_slab);
490 // We weren't either full or empty; we already had free
491 // elements. This changes nothing in the queue, and there
492 // already must have been a _first_available_slab.
495 assert(_first_available_slab);
498 PUBLIC template< typename Q >
501 slab_cache_anon::q_free(Q *quota, void *obj)
504 quota->free(_elem_size);
508 virtual unsigned long
509 slab_cache_anon::reap() // request that cache returns memory to system
511 Lock_guard<Lock> guard(&lock);
514 return 0; // haven't allocated anything yet
516 // never delete first slab, even if it is empty
517 if (_last_slab == _first_slab
518 || (! _last_slab->is_empty()))
521 slab *s = _last_slab;
523 if (_first_available_slab == s)
524 _first_available_slab = 0;
526 _last_slab = s->prev();
529 // explicitly call destructor to delete s;
531 block_free(s, _slab_size);
536 // Element initialization/destruction
539 slab_cache_anon::elem_ctor(void *)
544 slab_cache_anon::elem_dtor(void *)
553 slab_cache_anon::debug_dump ()
555 printf ("%s: %lu-KB slabs (",
556 _name, _slab_size / 1024);
558 unsigned count, total = 0, total_elems = 0;
559 slab* s = _first_slab;
566 total_elems += s->in_use();
571 printf ("%u full, ", count);
574 s && ! s->is_empty();
578 printf ("\n*** wrongly-enqueued full slab found\n");
580 total_elems += s->in_use();
585 printf ("%u used, ", count);
592 printf ("\n*** wrongly-enqueued nonempty slab found\n");
594 total_elems += s->in_use();
597 unsigned total_used = total;
600 printf ("%u empty = %u total) = %lu KB,\n %u elems (size=%u, align=%u)",
601 count, total, total * _slab_size / 1024,
602 total_elems, _elem_size, _alignment);
605 printf (", overhead = %lu B (%lu B) = %lu%% (%lu%%) \n",
606 total * _slab_size - total_elems * _elem_size,
607 total_used * _slab_size - total_elems * _elem_size,
608 100 - total_elems * _elem_size * 100 / (total * _slab_size),
609 100 - total_elems * _elem_size * 100 / (total_used * _slab_size));