5 // The anonymous slab allocator. You can specialize this allocator by
6 // providing your own initialization functions and your own low-level
7 // allocation functions.
14 slab(const slab&); // copy constructors remain undefined
18 slab_entry *_next_free;
21 slab_cache_anon *_cache;
22 slab_entry *_first_free;
24 unsigned short _in_use;
32 // Low-level allocator functions:
34 // Allocate/free a block. "size" is always a multiple of PAGE_SIZE.
35 virtual void *block_alloc(unsigned long size, unsigned long alignment) = 0;
36 virtual void block_free(void *block, unsigned long size) = 0;
40 slab_cache_anon(const slab_cache_anon&); // default constructor is undefined
43 // data declaration follows
46 // The slabs of this cache are held in a partially-sorted
47 // doubly-linked list. First come the fully-active slabs (all
48 // elements in use), then the partially active slabs, then empty
50 slab *_first_slab, *_first_available_slab, *_last_slab;
51 unsigned long _slab_size;
52 unsigned _entry_size, _elem_num;
53 typedef Spin_lock<> Lock;
63 #include <lock_guard.h>
65 // default deallocator must not be called -- must use explicit destruction
68 slab::operator delete(void* /*block*/)
70 assert (!"slab::operator delete called");
74 slab::slab(slab_cache_anon *cache, void *mem)
75 : _cache(cache), _next(0), _prev(0), _in_use(0)
77 // Compute pointer to first data element, now taking into account
78 // the latest colorization offset
79 char *data = reinterpret_cast<char*>(mem);
81 // Initialize the cache elements
82 slab_entry *e = 0, *e_prev = 0;
84 for (unsigned i = 0; i < cache->_elem_num; i++)
86 e = reinterpret_cast<slab_entry *>(data);
88 e->_next_free = e_prev;
89 data += cache->_entry_size;
100 slab_entry *e = _first_free;
105 _first_free = e->_next_free;
113 slab::free(void *entry)
115 slab_entry *e = reinterpret_cast<slab_entry *>(entry);
116 e->_next_free = _first_free;
125 slab::is_empty() const
132 slab::is_full() const
134 return _in_use == _cache->_elem_num;
146 slab::enqueue(slab *prev)
150 if ((_next = prev->_next))
161 _prev->_next = _next;
163 _next->_prev = _prev;
184 slab::operator new(size_t, void *block) throw()
186 // slabs must be size-aligned so that we can compute their addresses
187 // from element addresses
194 slab_cache_anon::entry_size(unsigned elem_size, unsigned alignment)
195 { return (elem_size + alignment - 1) & ~(alignment - 1); }
200 PUBLIC inline NEEDS[slab_cache_anon::entry_size]
201 slab_cache_anon::slab_cache_anon(unsigned elem_size,
204 unsigned long min_size,
205 unsigned long max_size)
206 : _first_slab(0), _first_available_slab(0), _last_slab(0),
207 _entry_size(entry_size(elem_size, alignment)),
213 _slab_size = min_size;
214 (_slab_size - sizeof(slab)) / _entry_size < 8
215 && _slab_size < max_size;
218 _elem_num = (_slab_size - sizeof(slab)) / _entry_size;
225 slab_cache_anon::slab_cache_anon(unsigned long slab_size,
229 : _first_slab(0), _first_available_slab(0), _last_slab(0),
230 _slab_size(slab_size), _entry_size(entry_size(elem_size, alignment)),
234 _elem_num = (_slab_size - sizeof(slab)) / _entry_size;
239 slab_cache_anon::~slab_cache_anon()
241 // the derived class should call destroy() before deleting us.
242 // assert(_first_slab == 0);
247 slab_cache_anon::destroy() // descendant should call this in destructor
253 slab_cache_anon::alloc() // request initialized member from cache
255 Lock_guard<Lock> guard(&lock);
257 if (! _first_available_slab)
259 char *m = (char*)block_alloc(_slab_size, _slab_size);
263 slab *s = new (m + _slab_size - sizeof(slab)) slab(this, m);
265 _first_available_slab = s;
269 assert(_last_slab->is_full());
271 _first_available_slab->enqueue(_last_slab);
272 _last_slab = _first_available_slab;
274 else // this was the first slab we allocated
275 _first_slab = _last_slab = _first_available_slab;
278 assert(_first_available_slab
279 && ! _first_available_slab->is_full());
280 assert(! _first_available_slab->prev()
281 || _first_available_slab->prev()->is_full());
283 void *ret = _first_available_slab->alloc();
286 if (_first_available_slab->is_full())
287 _first_available_slab = _first_available_slab->next();
292 PUBLIC template< typename Q >
295 slab_cache_anon::q_alloc(Q *quota)
297 if (EXPECT_FALSE(!quota->alloc(_entry_size)))
301 if (EXPECT_FALSE(!(r=alloc())))
303 quota->free(_entry_size);
312 slab_cache_anon::free(void *cache_entry) // return initialized member to cache
314 Lock_guard<Lock> guard(&lock);
316 slab *s = reinterpret_cast<slab*>
317 ((reinterpret_cast<unsigned long>(cache_entry) & ~(_slab_size - 1)) + _slab_size - sizeof(slab));
319 bool was_full = s->is_full();
321 s->free(cache_entry);
325 if (s->next() == 0) // have no right neighbor?
327 assert(! _first_available_slab);
329 else if (s->next()->is_full()) // right neigbor full?
331 // We requeue to become the first non-full slab in the queue
332 // so that all full slabs are at the beginning of the queue.
334 if (s == _first_slab)
335 _first_slab = s->next();
336 // don't care about _first_available_slab, _last_slab --
337 // they cannot point to s because we were full and have a
342 if (_first_available_slab)
344 // _first_available_slab->prev() is defined because we
345 // had a right neighbor which is full, that is,
346 // _first_available_slab wasn't our right neighbor and
347 // now isn't the first slab in the queue
348 assert(_first_available_slab->prev()->is_full());
349 s->enqueue(_first_available_slab->prev());
353 // all slabs were full
354 assert(_last_slab->is_full());
355 s->enqueue(_last_slab);
360 _first_available_slab = s;
363 else if (s->is_empty())
365 if (s->next() && (! s->next()->is_empty())) // right neighbor not empty?
367 // Move to tail of list
369 if (s == _first_slab)
370 _first_slab = s->next();
371 if (s == _first_available_slab)
372 _first_available_slab = s->next();
373 // don't care about _last_slab because we know we have a
378 s->enqueue(_last_slab);
384 // We weren't either full or empty; we already had free
385 // elements. This changes nothing in the queue, and there
386 // already must have been a _first_available_slab.
389 assert(_first_available_slab);
392 PUBLIC template< typename Q >
395 slab_cache_anon::q_free(Q *quota, void *obj)
398 quota->free(_entry_size);
402 virtual unsigned long
403 slab_cache_anon::reap() // request that cache returns memory to system
405 Lock_guard<Lock> guard(&lock);
408 return 0; // haven't allocated anything yet
410 // never delete first slab, even if it is empty
411 if (_last_slab == _first_slab
412 || (! _last_slab->is_empty()))
415 slab *s = _last_slab;
417 if (_first_available_slab == s)
418 _first_available_slab = 0;
420 _last_slab = s->prev();
423 // explicitly call destructor to delete s;
425 block_free(s, _slab_size);
436 slab_cache_anon::debug_dump()
438 printf ("%s: %lu-KB slabs (elems per slab=%d ",
439 _name, _slab_size / 1024, _elem_num);
441 unsigned count, total = 0, total_elems = 0;
442 slab* s = _first_slab;
449 total_elems += s->in_use();
454 printf ("%u full, ", count);
457 s && ! s->is_empty();
461 printf ("\n*** wrongly-enqueued full slab found\n");
463 total_elems += s->in_use();
468 printf ("%u used, ", count);
475 printf ("\n*** wrongly-enqueued nonempty slab found\n");
477 total_elems += s->in_use();
480 unsigned total_used = total;
483 printf ("%u empty = %u total) = %lu KB,\n %u elems (size=%u)",
484 count, total, total * _slab_size / 1024,
485 total_elems, _entry_size);
488 printf (", overhead = %lu B (%lu B) = %lu%% (%lu%%) \n",
489 total * _slab_size - total_elems * _entry_size,
490 total_used * _slab_size - total_elems * _entry_size,
491 100 - total_elems * _entry_size * 100 / (total * _slab_size),
492 100 - total_elems * _entry_size * 100 / (total_used * _slab_size));