2 This is a version (aka dlmalloc) of malloc/free/realloc written by
3 Doug Lea and released to the public domain. Use, modify, and
4 redistribute this code without permission or acknowledgement in any
5 way you wish. Send questions, comments, complaints, performance
6 data, etc to dl@cs.oswego.edu
8 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
10 Note: There may be an updated version of this malloc obtainable at
11 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
12 Check before installing!
14 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
25 #include <bits/uClibc_mutex.h>
27 #ifndef L4_MINIMAL_LIBC
28 #include <l4/sys/consts.h>
32 __UCLIBC_MUTEX_EXTERN(__malloc_lock);
33 #define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
34 #define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
39 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
40 It must be a power of two at least 2 * (sizeof(size_t)), even on machines
41 for which smaller alignments would suffice. It may be defined as
42 larger than this though. Note however that code and data structures
43 are optimized for the case of 8-byte alignment.
45 #ifndef MALLOC_ALIGNMENT
46 #define MALLOC_ALIGNMENT (2 * (sizeof(size_t)))
49 /* The corresponding bit mask value */
50 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
53 TRIM_FASTBINS controls whether free() of a very small chunk can
54 immediately lead to trimming. Setting to true (1) can reduce memory
55 footprint, but will almost always slow down programs that use a lot
58 Define this only if you are willing to give up some speed to more
59 aggressively reduce system-level memory footprint when releasing
60 memory in programs that use many small chunks. You can get
61 essentially the same effect by setting MXFAST to 0, but this can
62 lead to even greater slowdowns in programs using many small chunks.
63 TRIM_FASTBINS is an in-between compile-time option, that disables
64 only those chunks bordering topmost memory from being placed in
68 #define TRIM_FASTBINS 0
73 MORECORE-related declarations. By default, rely on sbrk
78 MORECORE is the name of the routine to call to obtain more memory
79 from the system. See below for general guidance on writing
80 alternative MORECORE functions, as well as a version for WIN32 and a
81 sample version for pre-OSX macos.
84 #define MORECORE(x) ({ -1; })
88 MORECORE_FAILURE is the value returned upon failure of MORECORE
89 as well as mmap. Since it cannot be an otherwise valid memory address,
90 and must reflect values of standard sys calls, you probably ought not
93 #ifndef MORECORE_FAILURE
94 #define MORECORE_FAILURE (-1)
98 If MORECORE_CONTIGUOUS is true, take advantage of fact that
99 consecutive calls to MORECORE with positive arguments always return
100 contiguous increasing addresses. This is true of unix sbrk. Even
101 if not defined, when regions happen to be contiguous, malloc will
102 permit allocations spanning regions obtained from different
103 calls. But defining this when applicable enables some stronger
104 consistency checks and space efficiencies.
106 #ifndef MORECORE_CONTIGUOUS
107 #define MORECORE_CONTIGUOUS 1
111 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
112 sbrk fails, and mmap is used as a backup (which is done only if
113 HAVE_MMAP). The value must be a multiple of page size. This
114 backup strategy generally applies only when systems have "holes" in
115 address space, so sbrk cannot perform contiguous expansion, but
116 there is still space available on system. On systems for which
117 this is known to be useful (i.e. most linux kernels), this occurs
118 only when programs allocate huge amounts of memory. Between this,
119 and the fact that mmap regions tend to be limited, the size should
120 be large, to avoid too many mmap calls and thus avoid running out
123 #ifndef MMAP_AS_MORECORE_SIZE
124 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
128 The system page size. To the extent possible, this malloc manages
129 memory from the system in page-size units. Note that this value is
130 cached during initialization into a field of malloc_state. So even
131 if malloc_getpagesize is a function, it is only called once.
133 The following mechanics for getpagesize were adapted from bsd/gnu
134 getpagesize.h. If none of the system-probes here apply, a value of
135 4096 is used, which should be OK: If they don't apply, then using
136 the actual value probably doesn't impact performance.
138 #ifndef malloc_getpagesize
139 //# include <unistd.h>
140 //# define malloc_getpagesize sysconf(_SC_PAGESIZE)
141 //#else /* just guess */
142 #ifdef L4_MINIMAL_LIBC
143 # define malloc_getpagesize (4096)
145 # define malloc_getpagesize (L4_PAGESIZE)
146 #if L4_PAGESIZE != 4096
147 # error Adapt this code for minimal libc version
153 /* mallopt tuning options */
156 M_MXFAST is the maximum request size used for "fastbins", special bins
157 that hold returned chunks without consolidating their spaces. This
158 enables future requests for chunks of the same size to be handled
159 very quickly, but can increase fragmentation, and thus increase the
160 overall memory footprint of a program.
162 This malloc manages fastbins very conservatively yet still
163 efficiently, so fragmentation is rarely a problem for values less
164 than or equal to the default. The maximum supported value of MXFAST
165 is 80. You wouldn't want it any higher than this anyway. Fastbins
166 are designed especially for use with many small structs, objects or
167 strings -- the default handles structs/objects/arrays with sizes up
168 to 16 4byte fields, or small strings representing words, tokens,
169 etc. Using fastbins for larger objects normally worsens
170 fragmentation without improving speed.
172 M_MXFAST is set in REQUEST size units. It is internally used in
173 chunksize units, which adds padding and alignment. You can reduce
174 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
175 algorithm to be a closer approximation of fifo-best-fit in all cases,
176 not just for larger requests, but will generally cause it to be
181 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
186 #ifndef DEFAULT_MXFAST
187 #define DEFAULT_MXFAST 64
192 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
193 to keep before releasing via malloc_trim in free().
195 Automatic trimming is mainly useful in long-lived programs.
196 Because trimming via sbrk can be slow on some systems, and can
197 sometimes be wasteful (in cases where programs immediately
198 afterward allocate more large chunks) the value should be high
199 enough so that your overall system performance would improve by
200 releasing this much memory.
202 The trim threshold and the mmap control parameters (see below)
203 can be traded off with one another. Trimming and mmapping are
204 two different ways of releasing unused memory back to the
205 system. Between these two, it is often possible to keep
206 system-level demands of a long-lived program down to a bare
207 minimum. For example, in one test suite of sessions measuring
208 the XF86 X server on Linux, using a trim threshold of 128K and a
209 mmap threshold of 192K led to near-minimal long term resource
212 If you are using this malloc in a long-lived program, it should
213 pay to experiment with these values. As a rough guide, you
214 might set to a value close to the average size of a process
215 (program) running on your system. Releasing this much memory
216 would allow such a process to run in memory. Generally, it's
217 worth it to tune for trimming rather tham memory mapping when a
218 program undergoes phases where several large chunks are
219 allocated and released in ways that can reuse each other's
220 storage, perhaps mixed with phases where there are no such
221 chunks at all. And in well-behaved long-lived programs,
222 controlling release of large blocks via trimming versus mapping
225 However, in most programs, these parameters serve mainly as
226 protection against the system-level effects of carrying around
227 massive amounts of unneeded memory. Since frequent calls to
228 sbrk, mmap, and munmap otherwise degrade performance, the default
229 parameters are set to relatively high values that serve only as
232 The trim value must be greater than page size to have any useful
233 effect. To disable trimming completely, you can set to
236 Trim settings interact with fastbin (MXFAST) settings: Unless
237 TRIM_FASTBINS is defined, automatic trimming never takes place upon
238 freeing a chunk with size less than or equal to MXFAST. Trimming is
239 instead delayed until subsequent freeing of larger chunks. However,
240 you can still force an attempted trim by calling malloc_trim.
242 Also, trimming is not generally possible in cases where
243 the main arena is obtained via mmap.
245 Note that the trick some people use of mallocing a huge space and
246 then freeing it at program startup, in an attempt to reserve system
247 memory, doesn't have the intended effect under automatic trimming,
248 since that memory will immediately be returned to the system.
250 #define M_TRIM_THRESHOLD -1
252 #ifndef DEFAULT_TRIM_THRESHOLD
253 #define DEFAULT_TRIM_THRESHOLD (256 * 1024)
257 M_TOP_PAD is the amount of extra `padding' space to allocate or
258 retain whenever sbrk is called. It is used in two ways internally:
260 * When sbrk is called to extend the top of the arena to satisfy
261 a new malloc request, this much padding is added to the sbrk
264 * When malloc_trim is called automatically from free(),
265 it is used as the `pad' argument.
267 In both cases, the actual amount of padding is rounded
268 so that the end of the arena is always a system page boundary.
270 The main reason for using padding is to avoid calling sbrk so
271 often. Having even a small pad greatly reduces the likelihood
272 that nearly every malloc request during program start-up (or
273 after trimming) will invoke sbrk, which needlessly wastes
276 Automatic rounding-up to page-size units is normally sufficient
277 to avoid measurable overhead, so the default is 0. However, in
278 systems where sbrk is relatively slow, it can pay to increase
279 this value, at the expense of carrying around more memory than
284 #ifndef DEFAULT_TOP_PAD
285 #define DEFAULT_TOP_PAD (0)
289 M_MMAP_THRESHOLD is the request size threshold for using mmap()
290 to service a request. Requests of at least this size that cannot
291 be allocated using already-existing space will be serviced via mmap.
292 (If enough normal freed space already exists it is used instead.)
294 Using mmap segregates relatively large chunks of memory so that
295 they can be individually obtained and released from the host
296 system. A request serviced through mmap is never reused by any
297 other request (at least not directly; the system may just so
298 happen to remap successive requests to the same locations).
300 Segregating space in this way has the benefits that:
302 1. Mmapped space can ALWAYS be individually released back
303 to the system, which helps keep the system level memory
304 demands of a long-lived program low.
305 2. Mapped memory can never become `locked' between
306 other chunks, as can happen with normally allocated chunks, which
307 means that even trimming via malloc_trim would not release them.
308 3. On some systems with "holes" in address spaces, mmap can obtain
309 memory that sbrk cannot.
311 However, it has the disadvantages that:
313 1. The space cannot be reclaimed, consolidated, and then
314 used to service later requests, as happens with normal chunks.
315 2. It can lead to more wastage because of mmap page alignment
317 3. It causes malloc performance to be more dependent on host
318 system memory management support routines which may vary in
319 implementation quality and may impose arbitrary
320 limitations. Generally, servicing a request via normal
321 malloc steps is faster than going through a system's mmap.
323 The advantages of mmap nearly always outweigh disadvantages for
324 "large" chunks, but the value of "large" varies across systems. The
325 default is an empirically derived value that works well in most
328 #define M_MMAP_THRESHOLD -3
330 #ifndef DEFAULT_MMAP_THRESHOLD
331 #define DEFAULT_MMAP_THRESHOLD (256 * 1024)
335 M_MMAP_MAX is the maximum number of requests to simultaneously
336 service using mmap. This parameter exists because
337 . Some systems have a limited number of internal tables for
338 use by mmap, and using more than a few of them may degrade
341 The default is set to a value that serves only as a safeguard.
342 Setting to 0 disables use of mmap for servicing large requests. If
343 HAVE_MMAP is not set, the default value is 0, and attempts to set it
344 to non-zero values in mallopt will fail.
346 #define M_MMAP_MAX -4
348 #ifndef DEFAULT_MMAP_MAX
349 #define DEFAULT_MMAP_MAX (65536)
353 /* ------------------ MMAP support ------------------ */
355 #include <sys/mman.h>
357 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
358 #define MAP_ANONYMOUS MAP_ANON
361 #ifdef __ARCH_USE_MMU__
363 #define MMAP(addr, size, prot) \
364 (mmap((addr), (size), (prot), MAP_PRIVATE|MAP_ANONYMOUS, 0, 0))
368 #define MMAP(addr, size, prot) \
369 (mmap((addr), (size), (prot), MAP_SHARED|MAP_ANONYMOUS|MAP_UNINITIALIZE, 0, 0))
374 /* ----------------------- Chunk representations ----------------------- */
378 This struct declaration is misleading (but accurate and necessary).
379 It declares a "view" into memory allowing access to necessary
380 fields at known offsets from a given base. See explanation below.
383 struct malloc_chunk {
385 size_t prev_size; /* Size of previous chunk (if free). */
386 size_t size; /* Size in bytes, including overhead. */
388 struct malloc_chunk* fd; /* double links -- used only if free. */
389 struct malloc_chunk* bk;
393 typedef struct malloc_chunk* mchunkptr;
396 malloc_chunk details:
398 (The following includes lightly edited explanations by Colin Plumb.)
400 Chunks of memory are maintained using a `boundary tag' method as
401 described in e.g., Knuth or Standish. (See the paper by Paul
402 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
403 survey of such techniques.) Sizes of free chunks are stored both
404 in the front of each chunk and at the end. This makes
405 consolidating fragmented chunks into bigger chunks very fast. The
406 size fields also hold bits representing whether chunks are free or
409 An allocated chunk looks like this:
412 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
413 | Size of previous chunk, if allocated | |
414 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
415 | Size of chunk, in bytes |P|
416 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
417 | User data starts here... .
419 . (malloc_usable_space() bytes) .
421 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
423 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
426 Where "chunk" is the front of the chunk for the purpose of most of
427 the malloc code, but "mem" is the pointer that is returned to the
428 user. "Nextchunk" is the beginning of the next contiguous chunk.
430 Chunks always begin on even word boundries, so the mem portion
431 (which is returned to the user) is also on an even word boundary, and
432 thus at least double-word aligned.
434 Free chunks are stored in circular doubly-linked lists, and look like this:
436 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
437 | Size of previous chunk |
438 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
439 `head:' | Size of chunk, in bytes |P|
440 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
441 | Forward pointer to next chunk in list |
442 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
443 | Back pointer to previous chunk in list |
444 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
445 | Unused space (may be 0 bytes long) .
448 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
449 `foot:' | Size of chunk, in bytes |
450 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
452 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
453 chunk size (which is always a multiple of two words), is an in-use
454 bit for the *previous* chunk. If that bit is *clear*, then the
455 word before the current chunk size contains the previous chunk
456 size, and can be used to find the front of the previous chunk.
457 The very first chunk allocated always has this bit set,
458 preventing access to non-existent (or non-owned) memory. If
459 prev_inuse is set for any given chunk, then you CANNOT determine
460 the size of the previous chunk, and might even get a memory
461 addressing fault when trying to do so.
463 Note that the `foot' of the current chunk is actually represented
464 as the prev_size of the NEXT chunk. This makes it easier to
465 deal with alignments etc but can be very confusing when trying
466 to extend or adapt this code.
468 The two exceptions to all this are
470 1. The special chunk `top' doesn't bother using the
471 trailing size field since there is no next contiguous chunk
472 that would have to index off it. After initialization, `top'
473 is forced to always exist. If it would become less than
474 MINSIZE bytes long, it is replenished.
476 2. Chunks allocated via mmap, which have the second-lowest-order
477 bit (IS_MMAPPED) set in their size fields. Because they are
478 allocated one-by-one, each must contain its own trailing size field.
483 ---------- Size and alignment checks and conversions ----------
486 /* conversion from malloc headers to user pointers, and back */
488 #define chunk2mem(p) ((void*)((char*)(p) + 2*(sizeof(size_t))))
489 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*(sizeof(size_t))))
491 /* The smallest possible chunk */
492 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
494 /* The smallest size we can malloc is an aligned minimal chunk */
497 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
499 /* Check if m has acceptable alignment */
501 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
504 /* Check if a request is so large that it would wrap around zero when
505 padded and aligned. To simplify some other code, the bound is made
506 low enough so that adding MINSIZE will also not wrap around sero.
509 #define REQUEST_OUT_OF_RANGE(req) \
510 ((unsigned long)(req) >= \
511 (unsigned long)(size_t)(-2 * MINSIZE))
513 /* pad request bytes into a usable size -- internal version */
515 #define request2size(req) \
516 (((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK < MINSIZE) ? \
518 ((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
520 /* Same, except also perform argument check */
522 #define checked_request2size(req, sz) \
523 if (REQUEST_OUT_OF_RANGE(req)) { \
527 (sz) = request2size(req);
530 --------------- Physical chunk operations ---------------
534 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
535 #define PREV_INUSE 0x1
537 /* extract inuse bit of previous chunk */
538 #define prev_inuse(p) ((p)->size & PREV_INUSE)
541 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
542 #define IS_MMAPPED 0x2
544 /* check for mmap()'ed chunk */
545 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
547 /* Bits to mask off when extracting size
549 Note: IS_MMAPPED is intentionally not masked off from size field in
550 macros for which mmapped chunks should never be seen. This should
551 cause helpful core dumps to occur if it is tried by accident by
552 people extending or adapting this malloc.
554 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
556 /* Get size, ignoring use bits */
557 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
560 /* Ptr to next physical malloc_chunk. */
561 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
563 /* Ptr to previous physical malloc_chunk */
564 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
566 /* Treat space at ptr + offset as a chunk */
567 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
569 /* extract p's inuse bit */
571 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
573 /* set/clear chunk as being inuse without otherwise disturbing */
574 #define set_inuse(p)\
575 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
577 #define clear_inuse(p)\
578 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
581 /* check/set/clear inuse bits in known places */
582 #define inuse_bit_at_offset(p, s)\
583 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
585 #define set_inuse_bit_at_offset(p, s)\
586 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
588 #define clear_inuse_bit_at_offset(p, s)\
589 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
592 /* Set size at head, without disturbing its use bit */
593 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
595 /* Set size/use field */
596 #define set_head(p, s) ((p)->size = (s))
598 /* Set size at footer (only when chunk is not in use) */
599 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
602 /* -------------------- Internal data structures -------------------- */
607 An array of bin headers for free chunks. Each bin is doubly
608 linked. The bins are approximately proportionally (log) spaced.
609 There are a lot of these bins (128). This may look excessive, but
610 works very well in practice. Most bins hold sizes that are
611 unusual as malloc request sizes, but are more usual for fragments
612 and consolidated sets of chunks, which is what these bins hold, so
613 they can be found quickly. All procedures maintain the invariant
614 that no consolidated chunk physically borders another one, so each
615 chunk in a list is known to be preceeded and followed by either
616 inuse chunks or the ends of memory.
618 Chunks in bins are kept in size order, with ties going to the
619 approximately least recently used chunk. Ordering isn't needed
620 for the small bins, which all contain the same-sized chunks, but
621 facilitates best-fit allocation for larger chunks. These lists
622 are just sequential. Keeping them in order almost never requires
623 enough traversal to warrant using fancier ordered data
626 Chunks of the same size are linked with the most
627 recently freed at the front, and allocations are taken from the
628 back. This results in LRU (FIFO) allocation order, which tends
629 to give each chunk an equal opportunity to be consolidated with
630 adjacent freed chunks, resulting in larger free chunks and less
633 To simplify use in double-linked lists, each bin header acts
634 as a malloc_chunk. This avoids special-casing for headers.
635 But to conserve space and improve locality, we allocate
636 only the fd/bk pointers of bins, and then use repositioning tricks
637 to treat these as the fields of a malloc_chunk*.
640 typedef struct malloc_chunk* mbinptr;
642 /* addressing -- note that bin_at(0) does not exist */
643 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - ((sizeof(size_t))<<1)))
645 /* analog of ++bin */
646 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
648 /* Reminders about list directionality within bins */
649 #define first(b) ((b)->fd)
650 #define last(b) ((b)->bk)
652 /* Take a chunk off a bin list */
653 #define unlink(P, BK, FD) { \
656 if (FD->bk != P || BK->fd != P) \
665 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
666 8 bytes apart. Larger bins are approximately logarithmically spaced:
673 2 bins of size 262144
674 1 bin of size what's left
676 The bins top out around 1MB because we expect to service large
681 #define NSMALLBINS 32
682 #define SMALLBIN_WIDTH 8
683 #define MIN_LARGE_SIZE 256
685 #define in_smallbin_range(sz) \
686 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
688 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
690 #define bin_index(sz) \
691 ((in_smallbin_range(sz)) ? smallbin_index(sz) : __malloc_largebin_index(sz))
694 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
695 first bin that is maintained in sorted order. This must
696 be the smallest size corresponding to a given bin.
698 Normally, this should be MIN_LARGE_SIZE. But you can weaken
699 best fit guarantees to sometimes speed up malloc by increasing value.
700 Doing this means that malloc may choose a chunk that is
701 non-best-fitting by up to the width of the bin.
703 Some useful cutoff values:
704 512 - all bins sorted
705 2560 - leaves bins <= 64 bytes wide unsorted
706 12288 - leaves bins <= 512 bytes wide unsorted
707 65536 - leaves bins <= 4096 bytes wide unsorted
708 262144 - leaves bins <= 32768 bytes wide unsorted
709 -1 - no bins sorted (not recommended!)
712 #define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
713 /* #define FIRST_SORTED_BIN_SIZE 65536 */
718 All remainders from chunk splits, as well as all returned chunks,
719 are first placed in the "unsorted" bin. They are then placed
720 in regular bins after malloc gives them ONE chance to be used before
721 binning. So, basically, the unsorted_chunks list acts as a queue,
722 with chunks being placed on it in free (and __malloc_consolidate),
723 and taken off (to be either used or placed in bins) in malloc.
726 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
727 #define unsorted_chunks(M) (bin_at(M, 1))
732 The top-most available chunk (i.e., the one bordering the end of
733 available memory) is treated specially. It is never included in
734 any bin, is used only if no other chunk is available, and is
735 released back to the system if it is very large (see
736 M_TRIM_THRESHOLD). Because top initially
737 points to its own bin with initial zero size, thus forcing
738 extension on the first malloc request, we avoid having any special
739 code in malloc to check whether it even exists yet. But we still
740 need to do so when getting memory from system, so we make
741 initial_top treat the bin as a legal but unusable chunk during the
742 interval between initialization and the first call to
743 __malloc_alloc. (This is somewhat delicate, since it relies on
744 the 2 preceding words to be zero during this interval as well.)
747 /* Conveniently, the unsorted bin can be used as dummy top on first call */
748 #define initial_top(M) (unsorted_chunks(M))
753 To help compensate for the large number of bins, a one-level index
754 structure is used for bin-by-bin searching. `binmap' is a
755 bitvector recording whether bins are definitely empty so they can
756 be skipped over during during traversals. The bits are NOT always
757 cleared as soon as bins are empty, but instead only
758 when they are noticed to be empty during traversal in malloc.
761 /* Conservatively use 32 bits per map word, even if on 64bit system */
762 #define BINMAPSHIFT 5
763 #define BITSPERMAP (1U << BINMAPSHIFT)
764 #define BINMAPSIZE (NBINS / BITSPERMAP)
766 #define idx2block(i) ((i) >> BINMAPSHIFT)
767 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
769 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
770 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
771 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
776 An array of lists holding recently freed small chunks. Fastbins
777 are not doubly linked. It is faster to single-link them, and
778 since chunks are never removed from the middles of these lists,
779 double linking is not necessary. Also, unlike regular bins, they
780 are not even processed in FIFO order (they use faster LIFO) since
781 ordering doesn't much matter in the transient contexts in which
782 fastbins are normally used.
784 Chunks in fastbins keep their inuse bit set, so they cannot
785 be consolidated with other free chunks. __malloc_consolidate
786 releases all chunks in fastbins and consolidates them with
790 typedef struct malloc_chunk* mfastbinptr;
792 /* offset 2 to use otherwise unindexable first 2 bins */
793 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
795 /* The maximum fastbin request size we support */
796 #define MAX_FAST_SIZE 80
798 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
801 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
802 that triggers automatic consolidation of possibly-surrounding
803 fastbin chunks. This is a heuristic, so the exact value should not
804 matter too much. It is defined at half the default trim threshold as a
805 compromise heuristic to only attempt consolidation if it is likely
806 to lead to trimming. However, it is not dynamically tunable, since
807 consolidation reduces fragmentation surrounding loarge chunks even
808 if trimming is not used.
811 #define FASTBIN_CONSOLIDATION_THRESHOLD \
812 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
815 Since the lowest 2 bits in max_fast don't matter in size comparisons,
816 they are used as flags.
820 ANYCHUNKS_BIT held in max_fast indicates that there may be any
821 freed chunks at all. It is set true when entering a chunk into any
825 #define ANYCHUNKS_BIT (1U)
827 #define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
828 #define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
829 #define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
832 FASTCHUNKS_BIT held in max_fast indicates that there are probably
833 some fastbin chunks. It is set true on entering a chunk into any
834 fastbin, and cleared only in __malloc_consolidate.
837 #define FASTCHUNKS_BIT (2U)
839 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
840 #define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
841 #define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
843 /* Set value of max_fast. Use impossibly small value if 0. */
844 #define set_max_fast(M, s) \
845 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
846 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
848 #define get_max_fast(M) \
849 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
853 morecore_properties is a status word holding dynamically discovered
854 or controlled properties of the morecore function
857 #define MORECORE_CONTIGUOUS_BIT (1U)
859 #define contiguous(M) \
860 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
861 #define noncontiguous(M) \
862 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
863 #define set_contiguous(M) \
864 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
865 #define set_noncontiguous(M) \
866 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
870 ----------- Internal state representation and initialization -----------
873 struct malloc_state {
875 /* The maximum chunk size to be eligible for fastbin */
876 size_t max_fast; /* low 2 bits used as flags */
879 mfastbinptr fastbins[NFASTBINS];
881 /* Base of the topmost chunk -- not otherwise kept in a bin */
884 /* The remainder from the most recent split of a small request */
885 mchunkptr last_remainder;
887 /* Normal bins packed as described above */
888 mchunkptr bins[NBINS * 2];
890 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
891 unsigned int binmap[BINMAPSIZE+1];
893 /* Tunable parameters */
894 unsigned long trim_threshold;
896 size_t mmap_threshold;
898 /* Memory map support */
903 /* Cache malloc_getpagesize */
904 unsigned int pagesize;
906 /* Track properties of MORECORE */
907 unsigned int morecore_properties;
912 size_t max_sbrked_mem;
913 size_t max_mmapped_mem;
914 size_t max_total_mem;
917 typedef struct malloc_state *mstate;
920 There is exactly one instance of this struct in this malloc.
921 If you are adapting this malloc in a way that does NOT use a static
922 malloc_state, you MUST explicitly zero-fill it before using. This
923 malloc relies on the property that malloc_state is initialized to
924 all zeroes (as is true of C statics).
926 extern struct malloc_state __malloc_state; /* never directly referenced */
929 All uses of av_ are via get_malloc_state().
930 At most one "call" to get_malloc_state is made per invocation of
931 the public versions of malloc and free, but other routines
932 that in turn invoke malloc and/or free may call more then once.
933 Also, it is called in check* routines if __UCLIBC_MALLOC_DEBUGGING__ is set.
936 #define get_malloc_state() (&(__malloc_state))
938 /* External internal utilities operating on mstates */
939 void __malloc_consolidate(mstate) attribute_hidden;
942 /* Debugging support */
943 #ifndef __UCLIBC_MALLOC_DEBUGGING__
945 #define check_chunk(P)
946 #define check_free_chunk(P)
947 #define check_inuse_chunk(P)
948 #define check_remalloced_chunk(P,N)
949 #define check_malloced_chunk(P,N)
950 #define check_malloc_state()
951 #define assert(x) ((void)0)
956 #define check_chunk(P) __do_check_chunk(P)
957 #define check_free_chunk(P) __do_check_free_chunk(P)
958 #define check_inuse_chunk(P) __do_check_inuse_chunk(P)
959 #define check_remalloced_chunk(P,N) __do_check_remalloced_chunk(P,N)
960 #define check_malloced_chunk(P,N) __do_check_malloced_chunk(P,N)
961 #define check_malloc_state() __do_check_malloc_state()
963 extern void __do_check_chunk(mchunkptr p);
964 extern void __do_check_free_chunk(mchunkptr p);
965 extern void __do_check_inuse_chunk(mchunkptr p);
966 extern void __do_check_remalloced_chunk(mchunkptr p, size_t s);
967 extern void __do_check_malloced_chunk(mchunkptr p, size_t s);
968 extern void __do_check_malloc_state(void);