5 // The anonymous slab allocator. You can specialize this allocator by
6 // providing your own initialization functions and your own low-level
7 // allocation functions.
14 slab(const slab&); // copy constructors remain undefined
18 slab_entry *_next_free;
21 slab_cache_anon *_cache;
22 slab_entry *_first_free;
24 unsigned short _in_use;
32 // Low-level allocator functions:
34 // Allocate/free a block. "size" is always a multiple of PAGE_SIZE.
35 virtual void *block_alloc(unsigned long size, unsigned long alignment) = 0;
36 virtual void block_free(void *block, unsigned long size) = 0;
40 slab_cache_anon(const slab_cache_anon&); // default constructor is undefined
43 // data declaration follows
46 // The slabs of this cache are held in a partially-sorted
47 // doubly-linked list. First come the fully-active slabs (all
48 // elements in use), then the partially active slabs, then empty
50 slab *_first_slab, *_first_available_slab, *_last_slab;
51 unsigned long _slab_size;
52 unsigned _entry_size, _elem_num;
53 typedef Spin_lock<> Lock;
63 #include <lock_guard.h>
65 // default deallocator must not be called -- must use explicit destruction
68 slab::operator delete(void* /*block*/)
70 assert (!"slab::operator delete called");
74 slab::slab(slab_cache_anon *cache, void *mem)
75 : _cache(cache), _next(0), _prev(0), _in_use(0)
77 // Compute pointer to first data element, now taking into account
78 // the latest colorization offset
79 char *data = reinterpret_cast<char*>(mem);
81 // Initialize the cache elements
82 slab_entry *e = 0, *e_prev = 0;
84 for (unsigned i = 0; i < cache->_elem_num; i++)
86 e = reinterpret_cast<slab_entry *>(data);
88 e->_next_free = e_prev;
89 data += cache->_entry_size;
100 slab_entry *e = _first_free;
105 _first_free = e->_next_free;
113 slab::free(void *entry)
115 slab_entry *e = reinterpret_cast<slab_entry *>(entry);
116 e->_next_free = _first_free;
125 slab::is_empty() const
132 slab::is_full() const
134 return _in_use == _cache->_elem_num;
146 slab::enqueue(slab *prev)
150 if ((_next = prev->_next))
161 _prev->_next = _next;
163 _next->_prev = _prev;
184 slab::operator new(size_t, void *block) throw()
186 // slabs must be size-aligned so that we can compute their addresses
187 // from element addresses
194 slab_cache_anon::entry_size(unsigned elem_size, unsigned alignment)
195 { return (elem_size + alignment - 1) & ~(alignment - 1); }
200 PUBLIC inline NEEDS[slab_cache_anon::entry_size]
201 slab_cache_anon::slab_cache_anon(unsigned elem_size,
204 unsigned long min_size,
205 unsigned long max_size)
206 : _first_slab(0), _first_available_slab(0), _last_slab(0),
207 _entry_size(entry_size(elem_size, alignment)),
213 _slab_size = min_size;
214 (_slab_size - sizeof(slab)) / _entry_size < 8
215 && _slab_size < max_size;
218 _elem_num = (_slab_size - sizeof(slab)) / _entry_size;
225 slab_cache_anon::slab_cache_anon(unsigned long slab_size,
229 : _first_slab(0), _first_available_slab(0), _last_slab(0),
230 _slab_size(slab_size), _entry_size(entry_size(elem_size, alignment)),
234 _elem_num = (_slab_size - sizeof(slab)) / _entry_size;
239 slab_cache_anon::~slab_cache_anon()
241 // the derived class should call destroy() before deleting us.
242 // assert(_first_slab == 0);
247 slab_cache_anon::destroy() // descendant should call this in destructor
253 slab_cache_anon::alloc() // request initialized member from cache
255 void *unused_block = 0;
258 Lock_guard<Lock> guard(&lock);
260 if (EXPECT_FALSE(!_first_available_slab))
264 char *m = (char*)block_alloc(_slab_size, _slab_size);
268 slab *s = new (m + _slab_size - sizeof(slab)) slab(this, m);
272 if (EXPECT_TRUE(!_first_available_slab))
274 _first_available_slab = s;
278 assert(_last_slab->is_full());
280 _first_available_slab->enqueue(_last_slab);
281 _last_slab = _first_available_slab;
283 else // this was the first slab we allocated
284 _first_slab = _last_slab = _first_available_slab;
290 assert(_first_available_slab && ! _first_available_slab->is_full());
291 assert(! _first_available_slab->prev() || _first_available_slab->prev()->is_full());
293 ret = _first_available_slab->alloc();
296 if (_first_available_slab->is_full())
297 _first_available_slab = _first_available_slab->next();
301 block_free(unused_block, _slab_size);
306 PUBLIC template< typename Q >
309 slab_cache_anon::q_alloc(Q *quota)
311 if (EXPECT_FALSE(!quota->alloc(_entry_size)))
315 if (EXPECT_FALSE(!(r=alloc())))
317 quota->free(_entry_size);
326 slab_cache_anon::free(void *cache_entry) // return initialized member to cache
328 Lock_guard<Lock> guard(&lock);
330 slab *s = reinterpret_cast<slab*>
331 ((reinterpret_cast<unsigned long>(cache_entry) & ~(_slab_size - 1)) + _slab_size - sizeof(slab));
333 bool was_full = s->is_full();
335 s->free(cache_entry);
339 if (s->next() == 0) // have no right neighbor?
341 assert(! _first_available_slab);
343 else if (s->next()->is_full()) // right neigbor full?
345 // We requeue to become the first non-full slab in the queue
346 // so that all full slabs are at the beginning of the queue.
348 if (s == _first_slab)
349 _first_slab = s->next();
350 // don't care about _first_available_slab, _last_slab --
351 // they cannot point to s because we were full and have a
356 if (_first_available_slab)
358 // _first_available_slab->prev() is defined because we
359 // had a right neighbor which is full, that is,
360 // _first_available_slab wasn't our right neighbor and
361 // now isn't the first slab in the queue
362 assert(_first_available_slab->prev()->is_full());
363 s->enqueue(_first_available_slab->prev());
367 // all slabs were full
368 assert(_last_slab->is_full());
369 s->enqueue(_last_slab);
374 _first_available_slab = s;
377 else if (s->is_empty())
379 if (s->next() && (! s->next()->is_empty())) // right neighbor not empty?
381 // Move to tail of list
383 if (s == _first_slab)
384 _first_slab = s->next();
385 if (s == _first_available_slab)
386 _first_available_slab = s->next();
387 // don't care about _last_slab because we know we have a
392 s->enqueue(_last_slab);
398 // We weren't either full or empty; we already had free
399 // elements. This changes nothing in the queue, and there
400 // already must have been a _first_available_slab.
403 assert(_first_available_slab);
406 PUBLIC template< typename Q >
409 slab_cache_anon::q_free(Q *quota, void *obj)
412 quota->free(_entry_size);
416 virtual unsigned long
417 slab_cache_anon::reap() // request that cache returns memory to system
419 Lock_guard<Lock> guard(&lock);
422 return 0; // haven't allocated anything yet
424 // never delete first slab, even if it is empty
425 if (_last_slab == _first_slab
426 || (! _last_slab->is_empty()))
429 slab *s = _last_slab;
431 if (_first_available_slab == s)
432 _first_available_slab = 0;
434 _last_slab = s->prev();
437 // explicitly call destructor to delete s;
439 block_free(s, _slab_size);
450 slab_cache_anon::debug_dump()
452 printf ("%s: %lu-KB slabs (elems per slab=%d ",
453 _name, _slab_size / 1024, _elem_num);
455 unsigned count, total = 0, total_elems = 0;
456 slab* s = _first_slab;
463 total_elems += s->in_use();
468 printf ("%u full, ", count);
471 s && ! s->is_empty();
475 printf ("\n*** wrongly-enqueued full slab found\n");
477 total_elems += s->in_use();
482 printf ("%u used, ", count);
489 printf ("\n*** wrongly-enqueued nonempty slab found\n");
491 total_elems += s->in_use();
494 unsigned total_used = total;
497 printf ("%u empty = %u total) = %lu KB,\n %u elems (size=%u)",
498 count, total, total * _slab_size / 1024,
499 total_elems, _entry_size);
502 printf (", overhead = %lu B (%lu B) = %lu%% (%lu%%) \n",
503 total * _slab_size - total_elems * _entry_size,
504 total_used * _slab_size - total_elems * _entry_size,
505 100 - total_elems * _entry_size * 100 / (total * _slab_size),
506 100 - total_elems * _entry_size * 100 / (total_used * _slab_size));