2 This is a version (aka dlmalloc) of malloc/free/realloc written by
3 Doug Lea and released to the public domain. Use, modify, and
4 redistribute this code without permission or acknowledgement in any
5 way you wish. Send questions, comments, complaints, performance
6 data, etc to dl@cs.oswego.edu
8 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
10 Note: There may be an updated version of this malloc obtainable at
11 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
12 Check before installing!
14 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
25 #include <bits/uClibc_mutex.h>
27 #include <l4/sys/consts.h>
30 __UCLIBC_MUTEX_EXTERN(__malloc_lock);
31 #define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
32 #define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
37 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
38 It must be a power of two at least 2 * (sizeof(size_t)), even on machines
39 for which smaller alignments would suffice. It may be defined as
40 larger than this though. Note however that code and data structures
41 are optimized for the case of 8-byte alignment.
43 #ifndef MALLOC_ALIGNMENT
44 #define MALLOC_ALIGNMENT (2 * (sizeof(size_t)))
47 /* The corresponding bit mask value */
48 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
51 TRIM_FASTBINS controls whether free() of a very small chunk can
52 immediately lead to trimming. Setting to true (1) can reduce memory
53 footprint, but will almost always slow down programs that use a lot
56 Define this only if you are willing to give up some speed to more
57 aggressively reduce system-level memory footprint when releasing
58 memory in programs that use many small chunks. You can get
59 essentially the same effect by setting MXFAST to 0, but this can
60 lead to even greater slowdowns in programs using many small chunks.
61 TRIM_FASTBINS is an in-between compile-time option, that disables
62 only those chunks bordering topmost memory from being placed in
66 #define TRIM_FASTBINS 0
71 MORECORE-related declarations. By default, rely on sbrk
76 MORECORE is the name of the routine to call to obtain more memory
77 from the system. See below for general guidance on writing
78 alternative MORECORE functions, as well as a version for WIN32 and a
79 sample version for pre-OSX macos.
82 #define MORECORE(x) ({ -1; })
86 MORECORE_FAILURE is the value returned upon failure of MORECORE
87 as well as mmap. Since it cannot be an otherwise valid memory address,
88 and must reflect values of standard sys calls, you probably ought not
91 #ifndef MORECORE_FAILURE
92 #define MORECORE_FAILURE (-1)
96 If MORECORE_CONTIGUOUS is true, take advantage of fact that
97 consecutive calls to MORECORE with positive arguments always return
98 contiguous increasing addresses. This is true of unix sbrk. Even
99 if not defined, when regions happen to be contiguous, malloc will
100 permit allocations spanning regions obtained from different
101 calls. But defining this when applicable enables some stronger
102 consistency checks and space efficiencies.
104 #ifndef MORECORE_CONTIGUOUS
105 #define MORECORE_CONTIGUOUS 1
109 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
110 sbrk fails, and mmap is used as a backup (which is done only if
111 HAVE_MMAP). The value must be a multiple of page size. This
112 backup strategy generally applies only when systems have "holes" in
113 address space, so sbrk cannot perform contiguous expansion, but
114 there is still space available on system. On systems for which
115 this is known to be useful (i.e. most linux kernels), this occurs
116 only when programs allocate huge amounts of memory. Between this,
117 and the fact that mmap regions tend to be limited, the size should
118 be large, to avoid too many mmap calls and thus avoid running out
121 #ifndef MMAP_AS_MORECORE_SIZE
122 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
126 The system page size. To the extent possible, this malloc manages
127 memory from the system in page-size units. Note that this value is
128 cached during initialization into a field of malloc_state. So even
129 if malloc_getpagesize is a function, it is only called once.
131 The following mechanics for getpagesize were adapted from bsd/gnu
132 getpagesize.h. If none of the system-probes here apply, a value of
133 4096 is used, which should be OK: If they don't apply, then using
134 the actual value probably doesn't impact performance.
136 #ifndef malloc_getpagesize
137 //# include <unistd.h>
138 //# define malloc_getpagesize sysconf(_SC_PAGESIZE)
139 //#else /* just guess */
140 //# define malloc_getpagesize (4096)
141 # define malloc_getpagesize (L4_PAGESIZE)
145 /* mallopt tuning options */
148 M_MXFAST is the maximum request size used for "fastbins", special bins
149 that hold returned chunks without consolidating their spaces. This
150 enables future requests for chunks of the same size to be handled
151 very quickly, but can increase fragmentation, and thus increase the
152 overall memory footprint of a program.
154 This malloc manages fastbins very conservatively yet still
155 efficiently, so fragmentation is rarely a problem for values less
156 than or equal to the default. The maximum supported value of MXFAST
157 is 80. You wouldn't want it any higher than this anyway. Fastbins
158 are designed especially for use with many small structs, objects or
159 strings -- the default handles structs/objects/arrays with sizes up
160 to 16 4byte fields, or small strings representing words, tokens,
161 etc. Using fastbins for larger objects normally worsens
162 fragmentation without improving speed.
164 M_MXFAST is set in REQUEST size units. It is internally used in
165 chunksize units, which adds padding and alignment. You can reduce
166 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
167 algorithm to be a closer approximation of fifo-best-fit in all cases,
168 not just for larger requests, but will generally cause it to be
173 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
178 #ifndef DEFAULT_MXFAST
179 #define DEFAULT_MXFAST 64
184 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
185 to keep before releasing via malloc_trim in free().
187 Automatic trimming is mainly useful in long-lived programs.
188 Because trimming via sbrk can be slow on some systems, and can
189 sometimes be wasteful (in cases where programs immediately
190 afterward allocate more large chunks) the value should be high
191 enough so that your overall system performance would improve by
192 releasing this much memory.
194 The trim threshold and the mmap control parameters (see below)
195 can be traded off with one another. Trimming and mmapping are
196 two different ways of releasing unused memory back to the
197 system. Between these two, it is often possible to keep
198 system-level demands of a long-lived program down to a bare
199 minimum. For example, in one test suite of sessions measuring
200 the XF86 X server on Linux, using a trim threshold of 128K and a
201 mmap threshold of 192K led to near-minimal long term resource
204 If you are using this malloc in a long-lived program, it should
205 pay to experiment with these values. As a rough guide, you
206 might set to a value close to the average size of a process
207 (program) running on your system. Releasing this much memory
208 would allow such a process to run in memory. Generally, it's
209 worth it to tune for trimming rather tham memory mapping when a
210 program undergoes phases where several large chunks are
211 allocated and released in ways that can reuse each other's
212 storage, perhaps mixed with phases where there are no such
213 chunks at all. And in well-behaved long-lived programs,
214 controlling release of large blocks via trimming versus mapping
217 However, in most programs, these parameters serve mainly as
218 protection against the system-level effects of carrying around
219 massive amounts of unneeded memory. Since frequent calls to
220 sbrk, mmap, and munmap otherwise degrade performance, the default
221 parameters are set to relatively high values that serve only as
224 The trim value must be greater than page size to have any useful
225 effect. To disable trimming completely, you can set to
228 Trim settings interact with fastbin (MXFAST) settings: Unless
229 TRIM_FASTBINS is defined, automatic trimming never takes place upon
230 freeing a chunk with size less than or equal to MXFAST. Trimming is
231 instead delayed until subsequent freeing of larger chunks. However,
232 you can still force an attempted trim by calling malloc_trim.
234 Also, trimming is not generally possible in cases where
235 the main arena is obtained via mmap.
237 Note that the trick some people use of mallocing a huge space and
238 then freeing it at program startup, in an attempt to reserve system
239 memory, doesn't have the intended effect under automatic trimming,
240 since that memory will immediately be returned to the system.
242 #define M_TRIM_THRESHOLD -1
244 #ifndef DEFAULT_TRIM_THRESHOLD
245 #define DEFAULT_TRIM_THRESHOLD (256 * 1024)
249 M_TOP_PAD is the amount of extra `padding' space to allocate or
250 retain whenever sbrk is called. It is used in two ways internally:
252 * When sbrk is called to extend the top of the arena to satisfy
253 a new malloc request, this much padding is added to the sbrk
256 * When malloc_trim is called automatically from free(),
257 it is used as the `pad' argument.
259 In both cases, the actual amount of padding is rounded
260 so that the end of the arena is always a system page boundary.
262 The main reason for using padding is to avoid calling sbrk so
263 often. Having even a small pad greatly reduces the likelihood
264 that nearly every malloc request during program start-up (or
265 after trimming) will invoke sbrk, which needlessly wastes
268 Automatic rounding-up to page-size units is normally sufficient
269 to avoid measurable overhead, so the default is 0. However, in
270 systems where sbrk is relatively slow, it can pay to increase
271 this value, at the expense of carrying around more memory than
276 #ifndef DEFAULT_TOP_PAD
277 #define DEFAULT_TOP_PAD (0)
281 M_MMAP_THRESHOLD is the request size threshold for using mmap()
282 to service a request. Requests of at least this size that cannot
283 be allocated using already-existing space will be serviced via mmap.
284 (If enough normal freed space already exists it is used instead.)
286 Using mmap segregates relatively large chunks of memory so that
287 they can be individually obtained and released from the host
288 system. A request serviced through mmap is never reused by any
289 other request (at least not directly; the system may just so
290 happen to remap successive requests to the same locations).
292 Segregating space in this way has the benefits that:
294 1. Mmapped space can ALWAYS be individually released back
295 to the system, which helps keep the system level memory
296 demands of a long-lived program low.
297 2. Mapped memory can never become `locked' between
298 other chunks, as can happen with normally allocated chunks, which
299 means that even trimming via malloc_trim would not release them.
300 3. On some systems with "holes" in address spaces, mmap can obtain
301 memory that sbrk cannot.
303 However, it has the disadvantages that:
305 1. The space cannot be reclaimed, consolidated, and then
306 used to service later requests, as happens with normal chunks.
307 2. It can lead to more wastage because of mmap page alignment
309 3. It causes malloc performance to be more dependent on host
310 system memory management support routines which may vary in
311 implementation quality and may impose arbitrary
312 limitations. Generally, servicing a request via normal
313 malloc steps is faster than going through a system's mmap.
315 The advantages of mmap nearly always outweigh disadvantages for
316 "large" chunks, but the value of "large" varies across systems. The
317 default is an empirically derived value that works well in most
320 #define M_MMAP_THRESHOLD -3
322 #ifndef DEFAULT_MMAP_THRESHOLD
323 #define DEFAULT_MMAP_THRESHOLD (256 * 1024)
327 M_MMAP_MAX is the maximum number of requests to simultaneously
328 service using mmap. This parameter exists because
329 . Some systems have a limited number of internal tables for
330 use by mmap, and using more than a few of them may degrade
333 The default is set to a value that serves only as a safeguard.
334 Setting to 0 disables use of mmap for servicing large requests. If
335 HAVE_MMAP is not set, the default value is 0, and attempts to set it
336 to non-zero values in mallopt will fail.
338 #define M_MMAP_MAX -4
340 #ifndef DEFAULT_MMAP_MAX
341 #define DEFAULT_MMAP_MAX (65536)
345 /* ------------------ MMAP support ------------------ */
347 #include <sys/mman.h>
349 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
350 #define MAP_ANONYMOUS MAP_ANON
353 #ifdef __ARCH_USE_MMU__
355 #define MMAP(addr, size, prot) \
356 (mmap((addr), (size), (prot), MAP_PRIVATE|MAP_ANONYMOUS, 0, 0))
360 #define MMAP(addr, size, prot) \
361 (mmap((addr), (size), (prot), MAP_SHARED|MAP_ANONYMOUS|MAP_UNINITIALIZE, 0, 0))
366 /* ----------------------- Chunk representations ----------------------- */
370 This struct declaration is misleading (but accurate and necessary).
371 It declares a "view" into memory allowing access to necessary
372 fields at known offsets from a given base. See explanation below.
375 struct malloc_chunk {
377 size_t prev_size; /* Size of previous chunk (if free). */
378 size_t size; /* Size in bytes, including overhead. */
380 struct malloc_chunk* fd; /* double links -- used only if free. */
381 struct malloc_chunk* bk;
385 typedef struct malloc_chunk* mchunkptr;
388 malloc_chunk details:
390 (The following includes lightly edited explanations by Colin Plumb.)
392 Chunks of memory are maintained using a `boundary tag' method as
393 described in e.g., Knuth or Standish. (See the paper by Paul
394 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
395 survey of such techniques.) Sizes of free chunks are stored both
396 in the front of each chunk and at the end. This makes
397 consolidating fragmented chunks into bigger chunks very fast. The
398 size fields also hold bits representing whether chunks are free or
401 An allocated chunk looks like this:
404 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
405 | Size of previous chunk, if allocated | |
406 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
407 | Size of chunk, in bytes |P|
408 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
409 | User data starts here... .
411 . (malloc_usable_space() bytes) .
413 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
415 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
418 Where "chunk" is the front of the chunk for the purpose of most of
419 the malloc code, but "mem" is the pointer that is returned to the
420 user. "Nextchunk" is the beginning of the next contiguous chunk.
422 Chunks always begin on even word boundries, so the mem portion
423 (which is returned to the user) is also on an even word boundary, and
424 thus at least double-word aligned.
426 Free chunks are stored in circular doubly-linked lists, and look like this:
428 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
429 | Size of previous chunk |
430 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
431 `head:' | Size of chunk, in bytes |P|
432 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
433 | Forward pointer to next chunk in list |
434 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
435 | Back pointer to previous chunk in list |
436 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
437 | Unused space (may be 0 bytes long) .
440 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
441 `foot:' | Size of chunk, in bytes |
442 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
444 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
445 chunk size (which is always a multiple of two words), is an in-use
446 bit for the *previous* chunk. If that bit is *clear*, then the
447 word before the current chunk size contains the previous chunk
448 size, and can be used to find the front of the previous chunk.
449 The very first chunk allocated always has this bit set,
450 preventing access to non-existent (or non-owned) memory. If
451 prev_inuse is set for any given chunk, then you CANNOT determine
452 the size of the previous chunk, and might even get a memory
453 addressing fault when trying to do so.
455 Note that the `foot' of the current chunk is actually represented
456 as the prev_size of the NEXT chunk. This makes it easier to
457 deal with alignments etc but can be very confusing when trying
458 to extend or adapt this code.
460 The two exceptions to all this are
462 1. The special chunk `top' doesn't bother using the
463 trailing size field since there is no next contiguous chunk
464 that would have to index off it. After initialization, `top'
465 is forced to always exist. If it would become less than
466 MINSIZE bytes long, it is replenished.
468 2. Chunks allocated via mmap, which have the second-lowest-order
469 bit (IS_MMAPPED) set in their size fields. Because they are
470 allocated one-by-one, each must contain its own trailing size field.
475 ---------- Size and alignment checks and conversions ----------
478 /* conversion from malloc headers to user pointers, and back */
480 #define chunk2mem(p) ((void*)((char*)(p) + 2*(sizeof(size_t))))
481 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*(sizeof(size_t))))
483 /* The smallest possible chunk */
484 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
486 /* The smallest size we can malloc is an aligned minimal chunk */
489 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
491 /* Check if m has acceptable alignment */
493 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
496 /* Check if a request is so large that it would wrap around zero when
497 padded and aligned. To simplify some other code, the bound is made
498 low enough so that adding MINSIZE will also not wrap around sero.
501 #define REQUEST_OUT_OF_RANGE(req) \
502 ((unsigned long)(req) >= \
503 (unsigned long)(size_t)(-2 * MINSIZE))
505 /* pad request bytes into a usable size -- internal version */
507 #define request2size(req) \
508 (((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK < MINSIZE) ? \
510 ((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
512 /* Same, except also perform argument check */
514 #define checked_request2size(req, sz) \
515 if (REQUEST_OUT_OF_RANGE(req)) { \
519 (sz) = request2size(req);
522 --------------- Physical chunk operations ---------------
526 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
527 #define PREV_INUSE 0x1
529 /* extract inuse bit of previous chunk */
530 #define prev_inuse(p) ((p)->size & PREV_INUSE)
533 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
534 #define IS_MMAPPED 0x2
536 /* check for mmap()'ed chunk */
537 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
539 /* Bits to mask off when extracting size
541 Note: IS_MMAPPED is intentionally not masked off from size field in
542 macros for which mmapped chunks should never be seen. This should
543 cause helpful core dumps to occur if it is tried by accident by
544 people extending or adapting this malloc.
546 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
548 /* Get size, ignoring use bits */
549 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
552 /* Ptr to next physical malloc_chunk. */
553 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
555 /* Ptr to previous physical malloc_chunk */
556 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
558 /* Treat space at ptr + offset as a chunk */
559 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
561 /* extract p's inuse bit */
563 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
565 /* set/clear chunk as being inuse without otherwise disturbing */
566 #define set_inuse(p)\
567 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
569 #define clear_inuse(p)\
570 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
573 /* check/set/clear inuse bits in known places */
574 #define inuse_bit_at_offset(p, s)\
575 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
577 #define set_inuse_bit_at_offset(p, s)\
578 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
580 #define clear_inuse_bit_at_offset(p, s)\
581 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
584 /* Set size at head, without disturbing its use bit */
585 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
587 /* Set size/use field */
588 #define set_head(p, s) ((p)->size = (s))
590 /* Set size at footer (only when chunk is not in use) */
591 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
594 /* -------------------- Internal data structures -------------------- */
599 An array of bin headers for free chunks. Each bin is doubly
600 linked. The bins are approximately proportionally (log) spaced.
601 There are a lot of these bins (128). This may look excessive, but
602 works very well in practice. Most bins hold sizes that are
603 unusual as malloc request sizes, but are more usual for fragments
604 and consolidated sets of chunks, which is what these bins hold, so
605 they can be found quickly. All procedures maintain the invariant
606 that no consolidated chunk physically borders another one, so each
607 chunk in a list is known to be preceeded and followed by either
608 inuse chunks or the ends of memory.
610 Chunks in bins are kept in size order, with ties going to the
611 approximately least recently used chunk. Ordering isn't needed
612 for the small bins, which all contain the same-sized chunks, but
613 facilitates best-fit allocation for larger chunks. These lists
614 are just sequential. Keeping them in order almost never requires
615 enough traversal to warrant using fancier ordered data
618 Chunks of the same size are linked with the most
619 recently freed at the front, and allocations are taken from the
620 back. This results in LRU (FIFO) allocation order, which tends
621 to give each chunk an equal opportunity to be consolidated with
622 adjacent freed chunks, resulting in larger free chunks and less
625 To simplify use in double-linked lists, each bin header acts
626 as a malloc_chunk. This avoids special-casing for headers.
627 But to conserve space and improve locality, we allocate
628 only the fd/bk pointers of bins, and then use repositioning tricks
629 to treat these as the fields of a malloc_chunk*.
632 typedef struct malloc_chunk* mbinptr;
634 /* addressing -- note that bin_at(0) does not exist */
635 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - ((sizeof(size_t))<<1)))
637 /* analog of ++bin */
638 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
640 /* Reminders about list directionality within bins */
641 #define first(b) ((b)->fd)
642 #define last(b) ((b)->bk)
644 /* Take a chunk off a bin list */
645 #define unlink(P, BK, FD) { \
648 if (FD->bk != P || BK->fd != P) \
657 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
658 8 bytes apart. Larger bins are approximately logarithmically spaced:
665 2 bins of size 262144
666 1 bin of size what's left
668 The bins top out around 1MB because we expect to service large
673 #define NSMALLBINS 32
674 #define SMALLBIN_WIDTH 8
675 #define MIN_LARGE_SIZE 256
677 #define in_smallbin_range(sz) \
678 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
680 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
682 #define bin_index(sz) \
683 ((in_smallbin_range(sz)) ? smallbin_index(sz) : __malloc_largebin_index(sz))
686 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
687 first bin that is maintained in sorted order. This must
688 be the smallest size corresponding to a given bin.
690 Normally, this should be MIN_LARGE_SIZE. But you can weaken
691 best fit guarantees to sometimes speed up malloc by increasing value.
692 Doing this means that malloc may choose a chunk that is
693 non-best-fitting by up to the width of the bin.
695 Some useful cutoff values:
696 512 - all bins sorted
697 2560 - leaves bins <= 64 bytes wide unsorted
698 12288 - leaves bins <= 512 bytes wide unsorted
699 65536 - leaves bins <= 4096 bytes wide unsorted
700 262144 - leaves bins <= 32768 bytes wide unsorted
701 -1 - no bins sorted (not recommended!)
704 #define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
705 /* #define FIRST_SORTED_BIN_SIZE 65536 */
710 All remainders from chunk splits, as well as all returned chunks,
711 are first placed in the "unsorted" bin. They are then placed
712 in regular bins after malloc gives them ONE chance to be used before
713 binning. So, basically, the unsorted_chunks list acts as a queue,
714 with chunks being placed on it in free (and __malloc_consolidate),
715 and taken off (to be either used or placed in bins) in malloc.
718 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
719 #define unsorted_chunks(M) (bin_at(M, 1))
724 The top-most available chunk (i.e., the one bordering the end of
725 available memory) is treated specially. It is never included in
726 any bin, is used only if no other chunk is available, and is
727 released back to the system if it is very large (see
728 M_TRIM_THRESHOLD). Because top initially
729 points to its own bin with initial zero size, thus forcing
730 extension on the first malloc request, we avoid having any special
731 code in malloc to check whether it even exists yet. But we still
732 need to do so when getting memory from system, so we make
733 initial_top treat the bin as a legal but unusable chunk during the
734 interval between initialization and the first call to
735 __malloc_alloc. (This is somewhat delicate, since it relies on
736 the 2 preceding words to be zero during this interval as well.)
739 /* Conveniently, the unsorted bin can be used as dummy top on first call */
740 #define initial_top(M) (unsorted_chunks(M))
745 To help compensate for the large number of bins, a one-level index
746 structure is used for bin-by-bin searching. `binmap' is a
747 bitvector recording whether bins are definitely empty so they can
748 be skipped over during during traversals. The bits are NOT always
749 cleared as soon as bins are empty, but instead only
750 when they are noticed to be empty during traversal in malloc.
753 /* Conservatively use 32 bits per map word, even if on 64bit system */
754 #define BINMAPSHIFT 5
755 #define BITSPERMAP (1U << BINMAPSHIFT)
756 #define BINMAPSIZE (NBINS / BITSPERMAP)
758 #define idx2block(i) ((i) >> BINMAPSHIFT)
759 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
761 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
762 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
763 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
768 An array of lists holding recently freed small chunks. Fastbins
769 are not doubly linked. It is faster to single-link them, and
770 since chunks are never removed from the middles of these lists,
771 double linking is not necessary. Also, unlike regular bins, they
772 are not even processed in FIFO order (they use faster LIFO) since
773 ordering doesn't much matter in the transient contexts in which
774 fastbins are normally used.
776 Chunks in fastbins keep their inuse bit set, so they cannot
777 be consolidated with other free chunks. __malloc_consolidate
778 releases all chunks in fastbins and consolidates them with
782 typedef struct malloc_chunk* mfastbinptr;
784 /* offset 2 to use otherwise unindexable first 2 bins */
785 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
787 /* The maximum fastbin request size we support */
788 #define MAX_FAST_SIZE 80
790 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
793 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
794 that triggers automatic consolidation of possibly-surrounding
795 fastbin chunks. This is a heuristic, so the exact value should not
796 matter too much. It is defined at half the default trim threshold as a
797 compromise heuristic to only attempt consolidation if it is likely
798 to lead to trimming. However, it is not dynamically tunable, since
799 consolidation reduces fragmentation surrounding loarge chunks even
800 if trimming is not used.
803 #define FASTBIN_CONSOLIDATION_THRESHOLD \
804 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
807 Since the lowest 2 bits in max_fast don't matter in size comparisons,
808 they are used as flags.
812 ANYCHUNKS_BIT held in max_fast indicates that there may be any
813 freed chunks at all. It is set true when entering a chunk into any
817 #define ANYCHUNKS_BIT (1U)
819 #define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
820 #define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
821 #define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
824 FASTCHUNKS_BIT held in max_fast indicates that there are probably
825 some fastbin chunks. It is set true on entering a chunk into any
826 fastbin, and cleared only in __malloc_consolidate.
829 #define FASTCHUNKS_BIT (2U)
831 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
832 #define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
833 #define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
835 /* Set value of max_fast. Use impossibly small value if 0. */
836 #define set_max_fast(M, s) \
837 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
838 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
840 #define get_max_fast(M) \
841 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
845 morecore_properties is a status word holding dynamically discovered
846 or controlled properties of the morecore function
849 #define MORECORE_CONTIGUOUS_BIT (1U)
851 #define contiguous(M) \
852 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
853 #define noncontiguous(M) \
854 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
855 #define set_contiguous(M) \
856 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
857 #define set_noncontiguous(M) \
858 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
862 ----------- Internal state representation and initialization -----------
865 struct malloc_state {
867 /* The maximum chunk size to be eligible for fastbin */
868 size_t max_fast; /* low 2 bits used as flags */
871 mfastbinptr fastbins[NFASTBINS];
873 /* Base of the topmost chunk -- not otherwise kept in a bin */
876 /* The remainder from the most recent split of a small request */
877 mchunkptr last_remainder;
879 /* Normal bins packed as described above */
880 mchunkptr bins[NBINS * 2];
882 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
883 unsigned int binmap[BINMAPSIZE+1];
885 /* Tunable parameters */
886 unsigned long trim_threshold;
888 size_t mmap_threshold;
890 /* Memory map support */
895 /* Cache malloc_getpagesize */
896 unsigned int pagesize;
898 /* Track properties of MORECORE */
899 unsigned int morecore_properties;
904 size_t max_sbrked_mem;
905 size_t max_mmapped_mem;
906 size_t max_total_mem;
909 typedef struct malloc_state *mstate;
912 There is exactly one instance of this struct in this malloc.
913 If you are adapting this malloc in a way that does NOT use a static
914 malloc_state, you MUST explicitly zero-fill it before using. This
915 malloc relies on the property that malloc_state is initialized to
916 all zeroes (as is true of C statics).
918 extern struct malloc_state __malloc_state; /* never directly referenced */
921 All uses of av_ are via get_malloc_state().
922 At most one "call" to get_malloc_state is made per invocation of
923 the public versions of malloc and free, but other routines
924 that in turn invoke malloc and/or free may call more then once.
925 Also, it is called in check* routines if __UCLIBC_MALLOC_DEBUGGING__ is set.
928 #define get_malloc_state() (&(__malloc_state))
930 /* External internal utilities operating on mstates */
931 void __malloc_consolidate(mstate) attribute_hidden;
934 /* Debugging support */
935 #ifndef __UCLIBC_MALLOC_DEBUGGING__
937 #define check_chunk(P)
938 #define check_free_chunk(P)
939 #define check_inuse_chunk(P)
940 #define check_remalloced_chunk(P,N)
941 #define check_malloced_chunk(P,N)
942 #define check_malloc_state()
943 #define assert(x) ((void)0)
948 #define check_chunk(P) __do_check_chunk(P)
949 #define check_free_chunk(P) __do_check_free_chunk(P)
950 #define check_inuse_chunk(P) __do_check_inuse_chunk(P)
951 #define check_remalloced_chunk(P,N) __do_check_remalloced_chunk(P,N)
952 #define check_malloced_chunk(P,N) __do_check_malloced_chunk(P,N)
953 #define check_malloc_state() __do_check_malloc_state()
955 extern void __do_check_chunk(mchunkptr p);
956 extern void __do_check_free_chunk(mchunkptr p);
957 extern void __do_check_inuse_chunk(mchunkptr p);
958 extern void __do_check_remalloced_chunk(mchunkptr p, size_t s);
959 extern void __do_check_malloced_chunk(mchunkptr p, size_t s);
960 extern void __do_check_malloc_state(void);