5 // The anonymous slab allocator. You can specialize this allocator by
6 // providing your own initialization functions and your own low-level
7 // allocation functions.
16 // Low-level allocator functions:
18 // Allocate/free a block. "size" is always a multiple of PAGE_SIZE.
19 virtual void *block_alloc(unsigned long size, unsigned long alignment) = 0;
20 virtual void block_free(void *block, unsigned long size) = 0;
23 typedef Spin_lock Lock;
27 slab_cache_anon(const slab_cache_anon&); // default constructor is undefined
30 // data declaration follows
33 // The slabs of this cache are held in a partially-sorted
34 // doubly-linked list. First come the fully-active slabs (all
35 // elements in use), then the partially active slabs, then empty
37 slab *_first_slab, *_first_available_slab, *_last_slab;
38 unsigned long _slab_size;
39 unsigned _elem_size, _latest_offset, _alignment;
41 }; // end declaration of class slab_cache
48 #include <lock_guard.h>
50 #ifndef offsetof // should be defined in stddef.h, but isn't
51 #define offsetof(TYPE, MEMBER) (((size_t) &((TYPE *)10)->MEMBER) - 10)
58 class slab // a slab of the cache
61 slab(const slab&); // default constructors remain undefined
65 slab_entry *_next_free;
71 slab_cache_anon *_cache;
72 slab_entry *_first_free;
74 unsigned short _in_use, _free;
77 // slabs for CACHE_ENTRY should contain at least min_cache_items
79 static const unsigned min_cache_items = 4;
82 // data declaration follows
85 }; // end declaration of class slab
91 // default deallocator must not be called -- must use explicit destruction
94 slab::operator delete(void* /*block*/)
96 assert (!"slab::operator delete called");
101 slab::slab(slab_cache_anon *cache)
103 _data._cache = cache;
105 _data._next = _data._prev = 0;
107 // Compute offset of first slab_entry in slab, not taking into
108 // account the colorization offset. "slab_entry._entry[]" needs to
109 // be "cache->_alignment"-aligned
110 unsigned long offset_first_elem =
111 ((sizeof(slab_data) + sizeof (slab_entry) + cache->_alignment - 1)
112 & ~(cache->_alignment - 1))
113 - sizeof (slab_entry);
115 // Compute size of a slab entry, including alignment padding at end
117 unsigned entry_size =
118 (sizeof(slab_entry) + cache->_elem_size + cache->_alignment - 1)
119 & ~(cache->_alignment - 1);
121 // Compute number of elements fitting into a slab
123 (cache->_slab_size - offset_first_elem) / entry_size;
125 // Compute pointer to first data element, now taking into account
126 // the latest colorization offset
128 reinterpret_cast<char*>(this) + offset_first_elem + cache->_latest_offset;
130 // Update the colorization offset
131 cache->_latest_offset += cache->_alignment;
132 if (offset_first_elem + cache->_latest_offset + entry_size * elem_num
135 cache->_latest_offset = 0;
138 // Initialize the cache elements
139 slab_entry *e = 0, *e_prev = 0;
141 for (unsigned i = 0; i < elem_num; i++)
143 e = reinterpret_cast<slab_entry *>(data);
145 e->_next_free = e_prev;
146 cache->elem_ctor(& e->_entry[0]);
151 (sizeof(slab_entry) + cache->_elem_size + cache->_alignment - 1)
152 & ~(cache->_alignment - 1);
155 _data._first_free = e;
156 _data._free = elem_num;
163 assert(_data._in_use == 0);
165 slab_entry *e = _data._first_free;
169 _data._cache->elem_dtor(& e->_entry[0]);
178 slab_entry *e = _data._first_free;
183 _data._first_free = e->_next_free;
192 slab::free(void *entry)
194 slab_entry *e = reinterpret_cast<slab_entry *>
195 (reinterpret_cast<char*>(entry) - offsetof(slab_entry, _entry));
197 e->_next_free = _data._first_free;
198 _data._first_free = e;
200 assert(_data._in_use);
209 return _data._in_use == 0;
216 return _data._free == 0;
223 return _data._in_use;
228 slab::enqueue(slab *prev)
232 if ((_data._next = prev->_data._next))
233 _data._next->_data._prev = this;
235 _data._prev->_data._next = this;
243 _data._prev->_data._next = _data._next;
245 _data._next->_data._prev = _data._prev;
247 _data._prev = _data._next = 0;
266 slab::operator new(size_t,
267 slab_cache_anon *cache) throw()
269 // slabs must be size-aligned so that we can compute their addresses
270 // from element addresses
271 return cache->block_alloc(cache->_slab_size, cache->_slab_size);
276 slab::num_elems(unsigned long slab_size,
280 // Compute offset of first slab_entry in slab, not taking into
281 // account the colorization offset. "slab_entry._entry[]" needs to
282 // be "cache->_alignment"-aligned
283 unsigned long offset_first_elem =
284 ((sizeof(slab::slab_data) + sizeof (slab::slab_entry) + alignment - 1)
286 - sizeof (slab::slab_entry);
288 // Compute size of a slab entry, including alignment padding at end
290 unsigned entry_size =
291 (sizeof(slab::slab_entry) + elem_size + alignment - 1)
294 // Compute number of elements fitting into a slab
295 return (slab_size - offset_first_elem) / entry_size;
300 slab_cache_anon::num_elems(unsigned long slab_size,
303 { return slab::num_elems(slab_size, elem_size, alignment); }
309 slab_cache_anon::slab_cache_anon(unsigned elem_size,
312 unsigned long min_size,
313 unsigned long max_size)
314 : _first_slab(0), _first_available_slab(0), _last_slab(0),
315 _elem_size(elem_size),
316 _latest_offset(0), _alignment(alignment),
322 _slab_size = min_size;
323 num_elems(_slab_size, elem_size, alignment) < 8
324 && _slab_size < max_size;
332 slab_cache_anon::slab_cache_anon(unsigned long slab_size,
336 : _first_slab(0), _first_available_slab(0), _last_slab(0),
337 _slab_size(slab_size), _elem_size(elem_size),
338 _latest_offset(0), _alignment(alignment),
346 slab_cache_anon::~slab_cache_anon()
348 // the derived class should call destroy() before deleting us.
349 assert(_first_slab == 0);
354 slab_cache_anon::destroy() // descendant should call this in destructor
356 slab *n, *s = _first_slab;
362 // explicitly call destructor to delete s;
364 block_free(s, _slab_size);
374 slab_cache_anon::alloc() // request initialized member from cache
376 Lock_guard<Lock> guard(&lock);
378 if (! _first_available_slab)
380 slab *s = new (this) slab(this);
384 _first_available_slab = s;
388 assert(_last_slab->is_full());
390 _first_available_slab->enqueue(_last_slab);
391 _last_slab = _first_available_slab;
393 else // this was the first slab we allocated
394 _first_slab = _last_slab = _first_available_slab;
397 assert(_first_available_slab
398 && ! _first_available_slab->is_full());
399 assert(! _first_available_slab->prev()
400 || _first_available_slab->prev()->is_full());
402 void *ret = _first_available_slab->alloc();
405 if (_first_available_slab->is_full())
406 _first_available_slab = _first_available_slab->next();
411 PUBLIC template< typename Q >
414 slab_cache_anon::q_alloc(Q *quota)
416 if (EXPECT_FALSE(!quota->alloc(_elem_size)))
420 if (EXPECT_FALSE(!(r=alloc())))
422 quota->free(_elem_size);
431 slab_cache_anon::free(void *cache_entry) // return initialized member to cache
433 Lock_guard<Lock> guard(&lock);
435 slab *s = reinterpret_cast<slab*>
436 (reinterpret_cast<unsigned long>(cache_entry) & ~(_slab_size - 1));
438 bool was_full = s->is_full();
440 s->free(cache_entry);
444 if (s->next() == 0) // have no right neighbor?
446 assert(! _first_available_slab);
448 else if (s->next()->is_full()) // right neigbor full?
450 // We requeue to become the first non-full slab in the queue
451 // so that all full slabs are at the beginning of the queue.
453 if (s == _first_slab)
454 _first_slab = s->next();
455 // don't care about _first_available_slab, _last_slab --
456 // they cannot point to s because we were full and have a
461 if (_first_available_slab)
463 // _first_available_slab->prev() is defined because we
464 // had a right neighbor which is full, that is,
465 // _first_available_slab wasn't our right neighbor and
466 // now isn't the first slab in the queue
467 assert(_first_available_slab->prev()->is_full());
468 s->enqueue(_first_available_slab->prev());
472 // all slabs were full
473 assert(_last_slab->is_full());
474 s->enqueue(_last_slab);
479 _first_available_slab = s;
482 else if (s->is_empty())
484 if (s->next() && (! s->next()->is_empty())) // right neighbor not empty?
486 // Move to tail of list
488 if (s == _first_slab)
489 _first_slab = s->next();
490 if (s == _first_available_slab)
491 _first_available_slab = s->next();
492 // don't care about _last_slab because we know we have a
497 s->enqueue(_last_slab);
503 // We weren't either full or empty; we already had free
504 // elements. This changes nothing in the queue, and there
505 // already must have been a _first_available_slab.
508 assert(_first_available_slab);
511 PUBLIC template< typename Q >
514 slab_cache_anon::q_free(Q *quota, void *obj)
517 quota->free(_elem_size);
521 virtual unsigned long
522 slab_cache_anon::reap() // request that cache returns memory to system
524 Lock_guard<Lock> guard(&lock);
527 return 0; // haven't allocated anything yet
529 // never delete first slab, even if it is empty
530 if (_last_slab == _first_slab
531 || (! _last_slab->is_empty()))
534 slab *s = _last_slab;
536 if (_first_available_slab == s)
537 _first_available_slab = 0;
539 _last_slab = s->prev();
542 // explicitly call destructor to delete s;
544 block_free(s, _slab_size);
549 // Element initialization/destruction
552 slab_cache_anon::elem_ctor(void *)
557 slab_cache_anon::elem_dtor(void *)
566 slab_cache_anon::debug_dump ()
568 printf ("%s: %lu-KB slabs (",
569 _name, _slab_size / 1024);
571 unsigned count, total = 0, total_elems = 0;
572 slab* s = _first_slab;
579 total_elems += s->in_use();
584 printf ("%u full, ", count);
587 s && ! s->is_empty();
591 printf ("\n*** wrongly-enqueued full slab found\n");
593 total_elems += s->in_use();
598 printf ("%u used, ", count);
605 printf ("\n*** wrongly-enqueued nonempty slab found\n");
607 total_elems += s->in_use();
610 unsigned total_used = total;
613 printf ("%u empty = %u total) = %lu KB,\n %u elems (size=%u, align=%u)",
614 count, total, total * _slab_size / 1024,
615 total_elems, _elem_size, _alignment);
618 printf (", overhead = %lu B (%lu B) = %lu%% (%lu%%) \n",
619 total * _slab_size - total_elems * _elem_size,
620 total_used * _slab_size - total_elems * _elem_size,
621 100 - total_elems * _elem_size * 100 / (total * _slab_size),
622 100 - total_elems * _elem_size * 100 / (total_used * _slab_size));