2 This is a version (aka dlmalloc) of malloc/free/realloc written by
3 Doug Lea and released to the public domain. Use, modify, and
4 redistribute this code without permission or acknowledgement in any
5 way you wish. Send questions, comments, complaints, performance
6 data, etc to dl@cs.oswego.edu
8 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
10 Note: There may be an updated version of this malloc obtainable at
11 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
12 Check before installing!
14 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
25 #include <bits/uClibc_mutex.h>
29 __UCLIBC_MUTEX_EXTERN(__malloc_lock)
30 #if defined __UCLIBC_HAS_THREADS__ && !defined __LINUXTHREADS_OLD__
34 #define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
35 #define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
40 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
41 It must be a power of two at least 2 * (sizeof(size_t)), even on machines
42 for which smaller alignments would suffice. It may be defined as
43 larger than this though. Note however that code and data structures
44 are optimized for the case of 8-byte alignment.
46 #ifndef MALLOC_ALIGNMENT
47 #define MALLOC_ALIGNMENT (2 * (sizeof(size_t)))
50 /* The corresponding bit mask value */
51 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
54 TRIM_FASTBINS controls whether free() of a very small chunk can
55 immediately lead to trimming. Setting to true (1) can reduce memory
56 footprint, but will almost always slow down programs that use a lot
59 Define this only if you are willing to give up some speed to more
60 aggressively reduce system-level memory footprint when releasing
61 memory in programs that use many small chunks. You can get
62 essentially the same effect by setting MXFAST to 0, but this can
63 lead to even greater slowdowns in programs using many small chunks.
64 TRIM_FASTBINS is an in-between compile-time option, that disables
65 only those chunks bordering topmost memory from being placed in
69 #define TRIM_FASTBINS 0
74 MORECORE-related declarations. By default, rely on sbrk
79 MORECORE is the name of the routine to call to obtain more memory
80 from the system. See below for general guidance on writing
81 alternative MORECORE functions, as well as a version for WIN32 and a
82 sample version for pre-OSX macos.
89 MORECORE_FAILURE is the value returned upon failure of MORECORE
90 as well as mmap. Since it cannot be an otherwise valid memory address,
91 and must reflect values of standard sys calls, you probably ought not
94 #ifndef MORECORE_FAILURE
95 #define MORECORE_FAILURE (-1)
99 If MORECORE_CONTIGUOUS is true, take advantage of fact that
100 consecutive calls to MORECORE with positive arguments always return
101 contiguous increasing addresses. This is true of unix sbrk. Even
102 if not defined, when regions happen to be contiguous, malloc will
103 permit allocations spanning regions obtained from different
104 calls. But defining this when applicable enables some stronger
105 consistency checks and space efficiencies.
107 #ifndef MORECORE_CONTIGUOUS
108 #define MORECORE_CONTIGUOUS 1
112 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
113 sbrk fails, and mmap is used as a backup (which is done only if
114 HAVE_MMAP). The value must be a multiple of page size. This
115 backup strategy generally applies only when systems have "holes" in
116 address space, so sbrk cannot perform contiguous expansion, but
117 there is still space available on system. On systems for which
118 this is known to be useful (i.e. most linux kernels), this occurs
119 only when programs allocate huge amounts of memory. Between this,
120 and the fact that mmap regions tend to be limited, the size should
121 be large, to avoid too many mmap calls and thus avoid running out
124 #ifndef MMAP_AS_MORECORE_SIZE
125 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
129 The system page size. To the extent possible, this malloc manages
130 memory from the system in page-size units. Note that this value is
131 cached during initialization into a field of malloc_state. So even
132 if malloc_getpagesize is a function, it is only called once.
134 The following mechanics for getpagesize were adapted from bsd/gnu
135 getpagesize.h. If none of the system-probes here apply, a value of
136 4096 is used, which should be OK: If they don't apply, then using
137 the actual value probably doesn't impact performance.
139 #ifndef malloc_getpagesize
141 # define malloc_getpagesize sysconf(_SC_PAGESIZE)
142 #else /* just guess */
143 # define malloc_getpagesize (4096)
147 /* mallopt tuning options */
150 M_MXFAST is the maximum request size used for "fastbins", special bins
151 that hold returned chunks without consolidating their spaces. This
152 enables future requests for chunks of the same size to be handled
153 very quickly, but can increase fragmentation, and thus increase the
154 overall memory footprint of a program.
156 This malloc manages fastbins very conservatively yet still
157 efficiently, so fragmentation is rarely a problem for values less
158 than or equal to the default. The maximum supported value of MXFAST
159 is 80. You wouldn't want it any higher than this anyway. Fastbins
160 are designed especially for use with many small structs, objects or
161 strings -- the default handles structs/objects/arrays with sizes up
162 to 16 4byte fields, or small strings representing words, tokens,
163 etc. Using fastbins for larger objects normally worsens
164 fragmentation without improving speed.
166 M_MXFAST is set in REQUEST size units. It is internally used in
167 chunksize units, which adds padding and alignment. You can reduce
168 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
169 algorithm to be a closer approximation of fifo-best-fit in all cases,
170 not just for larger requests, but will generally cause it to be
175 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
180 #ifndef DEFAULT_MXFAST
181 #define DEFAULT_MXFAST 64
186 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
187 to keep before releasing via malloc_trim in free().
189 Automatic trimming is mainly useful in long-lived programs.
190 Because trimming via sbrk can be slow on some systems, and can
191 sometimes be wasteful (in cases where programs immediately
192 afterward allocate more large chunks) the value should be high
193 enough so that your overall system performance would improve by
194 releasing this much memory.
196 The trim threshold and the mmap control parameters (see below)
197 can be traded off with one another. Trimming and mmapping are
198 two different ways of releasing unused memory back to the
199 system. Between these two, it is often possible to keep
200 system-level demands of a long-lived program down to a bare
201 minimum. For example, in one test suite of sessions measuring
202 the XF86 X server on Linux, using a trim threshold of 128K and a
203 mmap threshold of 192K led to near-minimal long term resource
206 If you are using this malloc in a long-lived program, it should
207 pay to experiment with these values. As a rough guide, you
208 might set to a value close to the average size of a process
209 (program) running on your system. Releasing this much memory
210 would allow such a process to run in memory. Generally, it's
211 worth it to tune for trimming rather tham memory mapping when a
212 program undergoes phases where several large chunks are
213 allocated and released in ways that can reuse each other's
214 storage, perhaps mixed with phases where there are no such
215 chunks at all. And in well-behaved long-lived programs,
216 controlling release of large blocks via trimming versus mapping
219 However, in most programs, these parameters serve mainly as
220 protection against the system-level effects of carrying around
221 massive amounts of unneeded memory. Since frequent calls to
222 sbrk, mmap, and munmap otherwise degrade performance, the default
223 parameters are set to relatively high values that serve only as
226 The trim value must be greater than page size to have any useful
227 effect. To disable trimming completely, you can set to
230 Trim settings interact with fastbin (MXFAST) settings: Unless
231 TRIM_FASTBINS is defined, automatic trimming never takes place upon
232 freeing a chunk with size less than or equal to MXFAST. Trimming is
233 instead delayed until subsequent freeing of larger chunks. However,
234 you can still force an attempted trim by calling malloc_trim.
236 Also, trimming is not generally possible in cases where
237 the main arena is obtained via mmap.
239 Note that the trick some people use of mallocing a huge space and
240 then freeing it at program startup, in an attempt to reserve system
241 memory, doesn't have the intended effect under automatic trimming,
242 since that memory will immediately be returned to the system.
244 #define M_TRIM_THRESHOLD -1
246 #ifndef DEFAULT_TRIM_THRESHOLD
247 #define DEFAULT_TRIM_THRESHOLD (256 * 1024)
251 M_TOP_PAD is the amount of extra `padding' space to allocate or
252 retain whenever sbrk is called. It is used in two ways internally:
254 * When sbrk is called to extend the top of the arena to satisfy
255 a new malloc request, this much padding is added to the sbrk
258 * When malloc_trim is called automatically from free(),
259 it is used as the `pad' argument.
261 In both cases, the actual amount of padding is rounded
262 so that the end of the arena is always a system page boundary.
264 The main reason for using padding is to avoid calling sbrk so
265 often. Having even a small pad greatly reduces the likelihood
266 that nearly every malloc request during program start-up (or
267 after trimming) will invoke sbrk, which needlessly wastes
270 Automatic rounding-up to page-size units is normally sufficient
271 to avoid measurable overhead, so the default is 0. However, in
272 systems where sbrk is relatively slow, it can pay to increase
273 this value, at the expense of carrying around more memory than
278 #ifndef DEFAULT_TOP_PAD
279 #define DEFAULT_TOP_PAD (0)
283 M_MMAP_THRESHOLD is the request size threshold for using mmap()
284 to service a request. Requests of at least this size that cannot
285 be allocated using already-existing space will be serviced via mmap.
286 (If enough normal freed space already exists it is used instead.)
288 Using mmap segregates relatively large chunks of memory so that
289 they can be individually obtained and released from the host
290 system. A request serviced through mmap is never reused by any
291 other request (at least not directly; the system may just so
292 happen to remap successive requests to the same locations).
294 Segregating space in this way has the benefits that:
296 1. Mmapped space can ALWAYS be individually released back
297 to the system, which helps keep the system level memory
298 demands of a long-lived program low.
299 2. Mapped memory can never become `locked' between
300 other chunks, as can happen with normally allocated chunks, which
301 means that even trimming via malloc_trim would not release them.
302 3. On some systems with "holes" in address spaces, mmap can obtain
303 memory that sbrk cannot.
305 However, it has the disadvantages that:
307 1. The space cannot be reclaimed, consolidated, and then
308 used to service later requests, as happens with normal chunks.
309 2. It can lead to more wastage because of mmap page alignment
311 3. It causes malloc performance to be more dependent on host
312 system memory management support routines which may vary in
313 implementation quality and may impose arbitrary
314 limitations. Generally, servicing a request via normal
315 malloc steps is faster than going through a system's mmap.
317 The advantages of mmap nearly always outweigh disadvantages for
318 "large" chunks, but the value of "large" varies across systems. The
319 default is an empirically derived value that works well in most
322 #define M_MMAP_THRESHOLD -3
324 #ifndef DEFAULT_MMAP_THRESHOLD
325 #define DEFAULT_MMAP_THRESHOLD (256 * 1024)
329 M_MMAP_MAX is the maximum number of requests to simultaneously
330 service using mmap. This parameter exists because
331 . Some systems have a limited number of internal tables for
332 use by mmap, and using more than a few of them may degrade
335 The default is set to a value that serves only as a safeguard.
336 Setting to 0 disables use of mmap for servicing large requests. If
337 HAVE_MMAP is not set, the default value is 0, and attempts to set it
338 to non-zero values in mallopt will fail.
340 #define M_MMAP_MAX -4
342 #ifndef DEFAULT_MMAP_MAX
343 #define DEFAULT_MMAP_MAX (65536)
347 /* ------------------ MMAP support ------------------ */
349 #include <sys/mman.h>
351 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
352 #define MAP_ANONYMOUS MAP_ANON
355 #ifdef __ARCH_USE_MMU__
356 # define _MAP_UNINITIALIZED 0
358 # define _MAP_UNINITIALIZED MAP_UNINITIALIZED
361 #define MMAP(addr, size, prot) \
362 (mmap((addr), (size), (prot), MAP_PRIVATE|MAP_ANONYMOUS|_MAP_UNINITIALIZED, 0, 0))
365 /* ----------------------- Chunk representations ----------------------- */
369 This struct declaration is misleading (but accurate and necessary).
370 It declares a "view" into memory allowing access to necessary
371 fields at known offsets from a given base. See explanation below.
374 struct malloc_chunk {
376 size_t prev_size; /* Size of previous chunk (if free). */
377 size_t size; /* Size in bytes, including overhead. */
379 struct malloc_chunk* fd; /* double links -- used only if free. */
380 struct malloc_chunk* bk;
384 typedef struct malloc_chunk* mchunkptr;
387 malloc_chunk details:
389 (The following includes lightly edited explanations by Colin Plumb.)
391 Chunks of memory are maintained using a `boundary tag' method as
392 described in e.g., Knuth or Standish. (See the paper by Paul
393 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
394 survey of such techniques.) Sizes of free chunks are stored both
395 in the front of each chunk and at the end. This makes
396 consolidating fragmented chunks into bigger chunks very fast. The
397 size fields also hold bits representing whether chunks are free or
400 An allocated chunk looks like this:
403 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
404 | Size of previous chunk, if allocated | |
405 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
406 | Size of chunk, in bytes |P|
407 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
408 | User data starts here... .
410 . (malloc_usable_space() bytes) .
412 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
414 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
417 Where "chunk" is the front of the chunk for the purpose of most of
418 the malloc code, but "mem" is the pointer that is returned to the
419 user. "Nextchunk" is the beginning of the next contiguous chunk.
421 Chunks always begin on even word boundries, so the mem portion
422 (which is returned to the user) is also on an even word boundary, and
423 thus at least double-word aligned.
425 Free chunks are stored in circular doubly-linked lists, and look like this:
427 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
428 | Size of previous chunk |
429 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
430 `head:' | Size of chunk, in bytes |P|
431 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
432 | Forward pointer to next chunk in list |
433 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
434 | Back pointer to previous chunk in list |
435 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
436 | Unused space (may be 0 bytes long) .
439 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
440 `foot:' | Size of chunk, in bytes |
441 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
443 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
444 chunk size (which is always a multiple of two words), is an in-use
445 bit for the *previous* chunk. If that bit is *clear*, then the
446 word before the current chunk size contains the previous chunk
447 size, and can be used to find the front of the previous chunk.
448 The very first chunk allocated always has this bit set,
449 preventing access to non-existent (or non-owned) memory. If
450 prev_inuse is set for any given chunk, then you CANNOT determine
451 the size of the previous chunk, and might even get a memory
452 addressing fault when trying to do so.
454 Note that the `foot' of the current chunk is actually represented
455 as the prev_size of the NEXT chunk. This makes it easier to
456 deal with alignments etc but can be very confusing when trying
457 to extend or adapt this code.
459 The two exceptions to all this are
461 1. The special chunk `top' doesn't bother using the
462 trailing size field since there is no next contiguous chunk
463 that would have to index off it. After initialization, `top'
464 is forced to always exist. If it would become less than
465 MINSIZE bytes long, it is replenished.
467 2. Chunks allocated via mmap, which have the second-lowest-order
468 bit (IS_MMAPPED) set in their size fields. Because they are
469 allocated one-by-one, each must contain its own trailing size field.
474 ---------- Size and alignment checks and conversions ----------
477 /* conversion from malloc headers to user pointers, and back */
479 #define chunk2mem(p) ((void*)((char*)(p) + 2*(sizeof(size_t))))
480 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*(sizeof(size_t))))
482 /* The smallest possible chunk */
483 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
485 /* The smallest size we can malloc is an aligned minimal chunk */
488 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
490 /* Check if m has acceptable alignment */
492 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
495 /* Check if a request is so large that it would wrap around zero when
496 padded and aligned. To simplify some other code, the bound is made
497 low enough so that adding MINSIZE will also not wrap around sero.
500 #define REQUEST_OUT_OF_RANGE(req) \
501 ((unsigned long)(req) >= \
502 (unsigned long)(size_t)(-2 * MINSIZE))
504 /* pad request bytes into a usable size -- internal version */
506 #define request2size(req) \
507 (((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK < MINSIZE) ? \
509 ((req) + (sizeof(size_t)) + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
511 /* Same, except also perform argument check */
513 #define checked_request2size(req, sz) \
514 if (REQUEST_OUT_OF_RANGE(req)) { \
518 (sz) = request2size(req);
521 --------------- Physical chunk operations ---------------
525 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
526 #define PREV_INUSE 0x1
528 /* extract inuse bit of previous chunk */
529 #define prev_inuse(p) ((p)->size & PREV_INUSE)
532 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
533 #define IS_MMAPPED 0x2
535 /* check for mmap()'ed chunk */
536 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
538 /* Bits to mask off when extracting size
540 Note: IS_MMAPPED is intentionally not masked off from size field in
541 macros for which mmapped chunks should never be seen. This should
542 cause helpful core dumps to occur if it is tried by accident by
543 people extending or adapting this malloc.
545 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
547 /* Get size, ignoring use bits */
548 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
551 /* Ptr to next physical malloc_chunk. */
552 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
554 /* Ptr to previous physical malloc_chunk */
555 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
557 /* Treat space at ptr + offset as a chunk */
558 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
560 /* extract p's inuse bit */
562 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
564 /* set/clear chunk as being inuse without otherwise disturbing */
565 #define set_inuse(p)\
566 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
568 #define clear_inuse(p)\
569 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
572 /* check/set/clear inuse bits in known places */
573 #define inuse_bit_at_offset(p, s)\
574 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
576 #define set_inuse_bit_at_offset(p, s)\
577 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
579 #define clear_inuse_bit_at_offset(p, s)\
580 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
583 /* Set size at head, without disturbing its use bit */
584 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
586 /* Set size/use field */
587 #define set_head(p, s) ((p)->size = (s))
589 /* Set size at footer (only when chunk is not in use) */
590 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
593 /* -------------------- Internal data structures -------------------- */
598 An array of bin headers for free chunks. Each bin is doubly
599 linked. The bins are approximately proportionally (log) spaced.
600 There are a lot of these bins (128). This may look excessive, but
601 works very well in practice. Most bins hold sizes that are
602 unusual as malloc request sizes, but are more usual for fragments
603 and consolidated sets of chunks, which is what these bins hold, so
604 they can be found quickly. All procedures maintain the invariant
605 that no consolidated chunk physically borders another one, so each
606 chunk in a list is known to be preceeded and followed by either
607 inuse chunks or the ends of memory.
609 Chunks in bins are kept in size order, with ties going to the
610 approximately least recently used chunk. Ordering isn't needed
611 for the small bins, which all contain the same-sized chunks, but
612 facilitates best-fit allocation for larger chunks. These lists
613 are just sequential. Keeping them in order almost never requires
614 enough traversal to warrant using fancier ordered data
617 Chunks of the same size are linked with the most
618 recently freed at the front, and allocations are taken from the
619 back. This results in LRU (FIFO) allocation order, which tends
620 to give each chunk an equal opportunity to be consolidated with
621 adjacent freed chunks, resulting in larger free chunks and less
624 To simplify use in double-linked lists, each bin header acts
625 as a malloc_chunk. This avoids special-casing for headers.
626 But to conserve space and improve locality, we allocate
627 only the fd/bk pointers of bins, and then use repositioning tricks
628 to treat these as the fields of a malloc_chunk*.
631 typedef struct malloc_chunk* mbinptr;
633 /* addressing -- note that bin_at(0) does not exist */
634 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - ((sizeof(size_t))<<1)))
636 /* analog of ++bin */
637 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
639 /* Reminders about list directionality within bins */
640 #define first(b) ((b)->fd)
641 #define last(b) ((b)->bk)
643 /* Take a chunk off a bin list */
644 #define unlink(P, BK, FD) { \
647 if (FD->bk != P || BK->fd != P) \
656 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
657 8 bytes apart. Larger bins are approximately logarithmically spaced:
664 2 bins of size 262144
665 1 bin of size what's left
667 The bins top out around 1MB because we expect to service large
672 #define NSMALLBINS 32
673 #define SMALLBIN_WIDTH 8
674 #define MIN_LARGE_SIZE 256
676 #define in_smallbin_range(sz) \
677 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
679 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
681 #define bin_index(sz) \
682 ((in_smallbin_range(sz)) ? smallbin_index(sz) : __malloc_largebin_index(sz))
685 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
686 first bin that is maintained in sorted order. This must
687 be the smallest size corresponding to a given bin.
689 Normally, this should be MIN_LARGE_SIZE. But you can weaken
690 best fit guarantees to sometimes speed up malloc by increasing value.
691 Doing this means that malloc may choose a chunk that is
692 non-best-fitting by up to the width of the bin.
694 Some useful cutoff values:
695 512 - all bins sorted
696 2560 - leaves bins <= 64 bytes wide unsorted
697 12288 - leaves bins <= 512 bytes wide unsorted
698 65536 - leaves bins <= 4096 bytes wide unsorted
699 262144 - leaves bins <= 32768 bytes wide unsorted
700 -1 - no bins sorted (not recommended!)
703 #define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE
704 /* #define FIRST_SORTED_BIN_SIZE 65536 */
709 All remainders from chunk splits, as well as all returned chunks,
710 are first placed in the "unsorted" bin. They are then placed
711 in regular bins after malloc gives them ONE chance to be used before
712 binning. So, basically, the unsorted_chunks list acts as a queue,
713 with chunks being placed on it in free (and __malloc_consolidate),
714 and taken off (to be either used or placed in bins) in malloc.
717 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
718 #define unsorted_chunks(M) (bin_at(M, 1))
723 The top-most available chunk (i.e., the one bordering the end of
724 available memory) is treated specially. It is never included in
725 any bin, is used only if no other chunk is available, and is
726 released back to the system if it is very large (see
727 M_TRIM_THRESHOLD). Because top initially
728 points to its own bin with initial zero size, thus forcing
729 extension on the first malloc request, we avoid having any special
730 code in malloc to check whether it even exists yet. But we still
731 need to do so when getting memory from system, so we make
732 initial_top treat the bin as a legal but unusable chunk during the
733 interval between initialization and the first call to
734 __malloc_alloc. (This is somewhat delicate, since it relies on
735 the 2 preceding words to be zero during this interval as well.)
738 /* Conveniently, the unsorted bin can be used as dummy top on first call */
739 #define initial_top(M) (unsorted_chunks(M))
744 To help compensate for the large number of bins, a one-level index
745 structure is used for bin-by-bin searching. `binmap' is a
746 bitvector recording whether bins are definitely empty so they can
747 be skipped over during during traversals. The bits are NOT always
748 cleared as soon as bins are empty, but instead only
749 when they are noticed to be empty during traversal in malloc.
752 /* Conservatively use 32 bits per map word, even if on 64bit system */
753 #define BINMAPSHIFT 5
754 #define BITSPERMAP (1U << BINMAPSHIFT)
755 #define BINMAPSIZE (NBINS / BITSPERMAP)
757 #define idx2block(i) ((i) >> BINMAPSHIFT)
758 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
760 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
761 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
762 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
767 An array of lists holding recently freed small chunks. Fastbins
768 are not doubly linked. It is faster to single-link them, and
769 since chunks are never removed from the middles of these lists,
770 double linking is not necessary. Also, unlike regular bins, they
771 are not even processed in FIFO order (they use faster LIFO) since
772 ordering doesn't much matter in the transient contexts in which
773 fastbins are normally used.
775 Chunks in fastbins keep their inuse bit set, so they cannot
776 be consolidated with other free chunks. __malloc_consolidate
777 releases all chunks in fastbins and consolidates them with
781 typedef struct malloc_chunk* mfastbinptr;
783 /* offset 2 to use otherwise unindexable first 2 bins */
784 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
786 /* The maximum fastbin request size we support */
787 #define MAX_FAST_SIZE 80
789 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
792 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
793 that triggers automatic consolidation of possibly-surrounding
794 fastbin chunks. This is a heuristic, so the exact value should not
795 matter too much. It is defined at half the default trim threshold as a
796 compromise heuristic to only attempt consolidation if it is likely
797 to lead to trimming. However, it is not dynamically tunable, since
798 consolidation reduces fragmentation surrounding loarge chunks even
799 if trimming is not used.
802 #define FASTBIN_CONSOLIDATION_THRESHOLD \
803 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
806 Since the lowest 2 bits in max_fast don't matter in size comparisons,
807 they are used as flags.
811 ANYCHUNKS_BIT held in max_fast indicates that there may be any
812 freed chunks at all. It is set true when entering a chunk into any
816 #define ANYCHUNKS_BIT (1U)
818 #define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
819 #define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
820 #define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
823 FASTCHUNKS_BIT held in max_fast indicates that there are probably
824 some fastbin chunks. It is set true on entering a chunk into any
825 fastbin, and cleared only in __malloc_consolidate.
828 #define FASTCHUNKS_BIT (2U)
830 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
831 #define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
832 #define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
834 /* Set value of max_fast. Use impossibly small value if 0. */
835 #define set_max_fast(M, s) \
836 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
837 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
839 #define get_max_fast(M) \
840 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
844 morecore_properties is a status word holding dynamically discovered
845 or controlled properties of the morecore function
848 #define MORECORE_CONTIGUOUS_BIT (1U)
850 #define contiguous(M) \
851 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
852 #define noncontiguous(M) \
853 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
854 #define set_contiguous(M) \
855 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
856 #define set_noncontiguous(M) \
857 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
861 ----------- Internal state representation and initialization -----------
864 struct malloc_state {
866 /* The maximum chunk size to be eligible for fastbin */
867 size_t max_fast; /* low 2 bits used as flags */
870 mfastbinptr fastbins[NFASTBINS];
872 /* Base of the topmost chunk -- not otherwise kept in a bin */
875 /* The remainder from the most recent split of a small request */
876 mchunkptr last_remainder;
878 /* Normal bins packed as described above */
879 mchunkptr bins[NBINS * 2];
881 /* Bitmap of bins. Trailing zero map handles cases of largest binned size */
882 unsigned int binmap[BINMAPSIZE+1];
884 /* Tunable parameters */
885 unsigned long trim_threshold;
887 size_t mmap_threshold;
889 /* Memory map support */
894 /* Cache malloc_getpagesize */
895 unsigned int pagesize;
897 /* Track properties of MORECORE */
898 unsigned int morecore_properties;
903 size_t max_sbrked_mem;
904 size_t max_mmapped_mem;
905 size_t max_total_mem;
908 typedef struct malloc_state *mstate;
911 There is exactly one instance of this struct in this malloc.
912 If you are adapting this malloc in a way that does NOT use a static
913 malloc_state, you MUST explicitly zero-fill it before using. This
914 malloc relies on the property that malloc_state is initialized to
915 all zeroes (as is true of C statics).
917 extern struct malloc_state __malloc_state attribute_hidden; /* never directly referenced */
920 All uses of av_ are via get_malloc_state().
921 At most one "call" to get_malloc_state is made per invocation of
922 the public versions of malloc and free, but other routines
923 that in turn invoke malloc and/or free may call more then once.
924 Also, it is called in check* routines if __UCLIBC_MALLOC_DEBUGGING__ is set.
927 #define get_malloc_state() (&(__malloc_state))
929 /* External internal utilities operating on mstates */
930 void __malloc_consolidate(mstate) attribute_hidden;
933 /* Debugging support */
934 #ifndef __UCLIBC_MALLOC_DEBUGGING__
936 #define check_chunk(P)
937 #define check_free_chunk(P)
938 #define check_inuse_chunk(P)
939 #define check_remalloced_chunk(P,N)
940 #define check_malloced_chunk(P,N)
941 #define check_malloc_state()
942 #define assert(x) ((void)0)
947 #define check_chunk(P) __do_check_chunk(P)
948 #define check_free_chunk(P) __do_check_free_chunk(P)
949 #define check_inuse_chunk(P) __do_check_inuse_chunk(P)
950 #define check_remalloced_chunk(P,N) __do_check_remalloced_chunk(P,N)
951 #define check_malloced_chunk(P,N) __do_check_malloced_chunk(P,N)
952 #define check_malloc_state() __do_check_malloc_state()
954 extern void __do_check_chunk(mchunkptr p) attribute_hidden;
955 extern void __do_check_free_chunk(mchunkptr p) attribute_hidden;
956 extern void __do_check_inuse_chunk(mchunkptr p) attribute_hidden;
957 extern void __do_check_remalloced_chunk(mchunkptr p, size_t s) attribute_hidden;
958 extern void __do_check_malloced_chunk(mchunkptr p, size_t s) attribute_hidden;
959 extern void __do_check_malloc_state(void) attribute_hidden;