6 #include <auto_quota.h>
8 // The anonymous slab allocator. You can specialize this allocator by
9 // providing your own initialization functions and your own low-level
10 // allocation functions.
12 class Slab : public cxx::H_list_item
15 Slab(const Slab&); // copy constructors remain undefined
17 typedef cxx::S_list_item Slab_entry;
18 typedef cxx::S_list<Slab_entry> Free_list;
21 unsigned short _elems;
22 unsigned short _in_use;
30 // Low-level allocator functions:
32 // Allocate/free a block. "size" is always a multiple of PAGE_SIZE.
33 virtual void *block_alloc(unsigned long size, unsigned long alignment) = 0;
34 virtual void block_free(void *block, unsigned long size) = 0;
38 Slab_cache(const Slab_cache&); // default constructor is undefined
41 // data declaration follows
44 typedef cxx::H_list<Slab> Slab_list;
45 Slab_list _full; ///< List of full slabs
46 Slab_list _partial; ///< List of partially filled slabs
47 Slab_list _empty; ///< List of empty slabs
50 unsigned long _slab_size;
51 unsigned _entry_size, _elem_num;
53 typedef Spin_lock<> Lock;
64 #include <lock_guard.h>
66 // default deallocator must not be called -- must use explicit destruction
69 Slab::operator delete(void* /*block*/)
71 assert (!"slab::operator delete called");
75 Slab::Slab(unsigned elems, unsigned entry_size, void *mem)
76 : _elems(elems), _in_use(0)
78 // Compute pointer to first data element, now taking into account
79 // the latest colorization offset
80 char *data = reinterpret_cast<char*>(mem);
82 // Initialize the cache elements
83 for (unsigned i = elems; i > 0; --i)
85 Slab_entry *e = reinterpret_cast<Slab_entry *>(data);
95 Slab_entry *e = _free.pop_front();
106 Slab::free(void *entry)
108 _free.add(reinterpret_cast<Slab_entry *>(entry));
116 Slab::is_empty() const
123 Slab::is_full() const
125 return _in_use == _elems;
137 Slab::operator new(size_t, void *block) throw()
139 // slabs must be size-aligned so that we can compute their addresses
140 // from element addresses
147 Slab_cache::entry_size(unsigned elem_size, unsigned alignment)
148 { return (elem_size + alignment - 1) & ~(alignment - 1); }
153 PUBLIC inline NEEDS[Slab_cache::entry_size]
154 Slab_cache::Slab_cache(unsigned elem_size,
157 unsigned long min_size,
158 unsigned long max_size)
159 : _entry_size(entry_size(elem_size, alignment)), _num_empty(0),
165 _slab_size = min_size;
166 (_slab_size - sizeof(Slab)) / _entry_size < 8
167 && _slab_size < max_size;
170 _elem_num = (_slab_size - sizeof(Slab)) / _entry_size;
177 Slab_cache::Slab_cache(unsigned long slab_size,
181 : _slab_size(slab_size), _entry_size(entry_size(elem_size, alignment)),
182 _num_empty(0), _name (name)
185 _elem_num = (_slab_size - sizeof(Slab)) / _entry_size;
190 Slab_cache::~Slab_cache()
192 // the derived class should call destroy() before deleting us.
193 // assert(_first_slab == 0);
198 Slab_cache::destroy() // descendant should call this in destructor
202 PRIVATE inline NOEXPORT
204 Slab_cache::get_available_locked()
206 Slab *s = _partial.front();
223 Slab_cache::alloc() // request initialized member from cache
225 void *unused_block = 0;
228 auto guard = lock_guard(lock);
230 Slab *s = get_available_locked();
232 if (EXPECT_FALSE(!s))
236 char *m = (char*)block_alloc(_slab_size, _slab_size);
239 new_slab = new (m + _slab_size - sizeof(Slab)) Slab(_elem_num, _entry_size, m);
243 // retry gettin a slab that might be allocated by a different
245 s = get_available_locked();
253 _partial.add(new_slab);
265 cxx::H_list<Slab>::remove(s);
271 block_free(unused_block, _slab_size);
276 PUBLIC template< typename Q >
279 Slab_cache::q_alloc(Q *quota)
281 Auto_quota<Ram_quota> q(quota, _entry_size);
282 if (EXPECT_FALSE(!q))
286 if (EXPECT_FALSE(!(r=alloc())))
295 Slab_cache::free(void *cache_entry) // return initialized member to cache
299 auto guard = lock_guard(lock);
301 Slab *s = reinterpret_cast<Slab*>
302 ((reinterpret_cast<unsigned long>(cache_entry) & ~(_slab_size - 1)) + _slab_size - sizeof(Slab));
304 bool was_full = s->is_full();
306 s->free(cache_entry);
310 cxx::H_list<Slab>::remove(s);
313 else if (s->is_empty())
315 cxx::H_list<Slab>::remove(s);
326 // We weren't either full or empty; we already had free
327 // elements. This changes nothing in the queue, and there
328 // already must have been a _first_available_slab.
335 block_free(reinterpret_cast<char *>(to_free + 1) - _slab_size, _slab_size);
339 PUBLIC template< typename Q >
342 Slab_cache::q_free(Q *quota, void *obj)
345 quota->free(_entry_size);
350 Slab_cache::reap() // request that cache returns memory to system
353 unsigned long sz = 0;
358 auto guard = lock_guard(lock);
365 cxx::H_list<Slab>::remove(s);
368 // explicitly call destructor to delete s;
370 block_free(reinterpret_cast<char *>(s + 1) - _slab_size, _slab_size);
384 Slab_cache::debug_dump()
386 printf ("%s: %lu-KB slabs (elems per slab=%d ",
387 _name, _slab_size / 1024, _elem_num);
389 unsigned count = 0, total = 0, total_elems = 0;
390 for (Slab_list::Const_iterator s = _full.begin(); s != _full.end(); ++s)
393 printf ("\n*** wrongly-enqueued full slab found\n");
396 total_elems += s->in_use();
401 printf ("%u full, ", count);
404 for (Slab_list::Const_iterator s = _partial.begin(); s != _partial.end(); ++s)
406 if (s->is_full() || s->is_empty())
407 printf ("\n*** wrongly-enqueued full slab found\n");
410 total_elems += s->in_use();
415 printf ("%u used, ", count);
418 for (Slab_list::Const_iterator s = _empty.begin(); s != _empty.end(); ++s)
421 printf ("\n*** wrongly-enqueued nonempty slab found\n");
425 unsigned total_used = total;
428 printf ("%u empty = %u total) = %lu KB,\n %u elems (size=%u)",
429 count, total, total * _slab_size / 1024,
430 total_elems, _entry_size);
433 printf (", overhead = %lu B (%lu B) = %lu%% (%lu%%) \n",
434 total * _slab_size - total_elems * _entry_size,
435 total_used * _slab_size - total_elems * _entry_size,
436 100 - total_elems * _entry_size * 100 / (total * _slab_size),
437 100 - total_elems * _entry_size * 100 / (total_used * _slab_size));