2 /*--------------------------------------------------------------------*/
3 /*--- malloc/free wrappers for detecting errors and updating bits. ---*/
4 /*--- mc_malloc_wrappers.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2010 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_execontext.h"
34 #include "pub_tool_hashtable.h"
35 #include "pub_tool_libcbase.h"
36 #include "pub_tool_libcassert.h"
37 #include "pub_tool_libcprint.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_tooliface.h" // Needed for mc_include.h
43 #include "pub_tool_stacktrace.h" // For VG_(get_and_pp_StackTrace)
45 #include "mc_include.h"
47 /*------------------------------------------------------------*/
49 /*------------------------------------------------------------*/
52 static SizeT cmalloc_n_mallocs = 0;
53 static SizeT cmalloc_n_frees = 0;
54 static ULong cmalloc_bs_mallocd = 0;
56 /* For debug printing to do with mempools: what stack trace
58 #define MEMPOOL_DEBUG_STACKTRACE_DEPTH 16
61 /*------------------------------------------------------------*/
62 /*--- Tracking malloc'd and free'd blocks ---*/
63 /*------------------------------------------------------------*/
65 /* Record malloc'd blocks. */
66 VgHashTable MC_(malloc_list) = NULL;
68 /* Memory pools: a hash table of MC_Mempools. Search key is
70 VgHashTable MC_(mempool_list) = NULL;
72 /* Records blocks after freeing. */
73 static MC_Chunk* freed_list_start = NULL;
74 static MC_Chunk* freed_list_end = NULL;
76 /* Put a shadow chunk on the freed blocks queue, possibly freeing up
77 some of the oldest blocks in the queue at the same time. */
78 static void add_to_freed_queue ( MC_Chunk* mc )
80 const Bool show = False;
82 /* Put it at the end of the freed list */
83 if (freed_list_end == NULL) {
84 tl_assert(freed_list_start == NULL);
85 freed_list_end = freed_list_start = mc;
86 VG_(free_queue_volume) = (Long)mc->szB;
88 tl_assert(freed_list_end->next == NULL);
89 freed_list_end->next = mc;
91 VG_(free_queue_volume) += (Long)mc->szB;
93 VG_(printf)("mc_freelist: acquire: volume now %lld\n",
94 VG_(free_queue_volume));
96 VG_(free_queue_length)++;
99 /* Release enough of the oldest blocks to bring the free queue
100 volume below vg_clo_freelist_vol. */
102 while (VG_(free_queue_volume) > MC_(clo_freelist_vol)) {
105 tl_assert(freed_list_start != NULL);
106 tl_assert(freed_list_end != NULL);
108 mc1 = freed_list_start;
109 VG_(free_queue_volume) -= (Long)mc1->szB;
110 VG_(free_queue_length)--;
112 VG_(printf)("mc_freelist: discard: volume now %lld\n",
113 VG_(free_queue_volume));
114 tl_assert(VG_(free_queue_volume) >= 0);
116 if (freed_list_start == freed_list_end) {
117 freed_list_start = freed_list_end = NULL;
119 freed_list_start = mc1->next;
121 mc1->next = NULL; /* just paranoia */
124 if (MC_AllocCustom != mc1->allockind)
125 VG_(cli_free) ( (void*)(mc1->data) );
130 MC_Chunk* MC_(get_freed_list_head)(void)
132 return freed_list_start;
135 /* Allocate its shadow chunk, put it on the appropriate list. */
137 MC_Chunk* create_MC_Chunk ( ExeContext* ec, Addr p, SizeT szB,
140 MC_Chunk* mc = VG_(malloc)("mc.cMC.1 (a MC_Chunk)", sizeof(MC_Chunk));
143 mc->allockind = kind;
146 /* Paranoia ... ensure the MC_Chunk is off-limits to the client, so
147 the mc->data field isn't visible to the leak checker. If memory
148 management is working correctly, any pointer returned by VG_(malloc)
149 should be noaccess as far as the client is concerned. */
150 if (!MC_(check_mem_is_noaccess)( (Addr)mc, sizeof(MC_Chunk), NULL )) {
151 VG_(tool_panic)("create_MC_Chunk: shadow area is accessible");
156 /*------------------------------------------------------------*/
157 /*--- client_malloc(), etc ---*/
158 /*------------------------------------------------------------*/
160 // XXX: should make this a proper error (bug #79311).
161 static Bool complain_about_silly_args(SizeT sizeB, Char* fn)
163 // Cast to a signed type to catch any unexpectedly negative args. We're
164 // assuming here that the size asked for is not greater than 2^31 bytes
165 // (for 32-bit platforms) or 2^63 bytes (for 64-bit platforms).
166 if ((SSizeT)sizeB < 0) {
168 VG_(message)(Vg_UserMsg, "Warning: silly arg (%ld) to %s()\n",
175 static Bool complain_about_silly_args2(SizeT n, SizeT sizeB)
177 if ((SSizeT)n < 0 || (SSizeT)sizeB < 0) {
179 VG_(message)(Vg_UserMsg,
180 "Warning: silly args (%ld,%ld) to calloc()\n",
181 (SSizeT)n, (SSizeT)sizeB);
187 /* Allocate memory and note change in memory available */
188 void* MC_(new_block) ( ThreadId tid,
189 Addr p, SizeT szB, SizeT alignB,
190 Bool is_zeroed, MC_AllocKind kind, VgHashTable table)
194 cmalloc_n_mallocs ++;
196 // Allocate and zero if necessary
198 tl_assert(MC_AllocCustom == kind);
200 tl_assert(MC_AllocCustom != kind);
201 p = (Addr)VG_(cli_malloc)( alignB, szB );
206 VG_(memset)((void*)p, 0, szB);
208 if (MC_(clo_malloc_fill) != -1) {
209 tl_assert(MC_(clo_malloc_fill) >= 0x00 && MC_(clo_malloc_fill) <= 0xFF);
210 VG_(memset)((void*)p, MC_(clo_malloc_fill), szB);
214 // Only update this stat if allocation succeeded.
215 cmalloc_bs_mallocd += (ULong)szB;
217 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
220 VG_(HT_add_node)( table, create_MC_Chunk(ec, p, szB, kind) );
223 MC_(make_mem_defined)( p, szB );
225 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
226 tl_assert(VG_(is_plausible_ECU)(ecu));
227 MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
233 void* MC_(malloc) ( ThreadId tid, SizeT n )
235 if (complain_about_silly_args(n, "malloc")) {
238 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
239 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
243 void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
245 if (complain_about_silly_args(n, "__builtin_new")) {
248 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
249 /*is_zeroed*/False, MC_AllocNew, MC_(malloc_list));
253 void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
255 if (complain_about_silly_args(n, "__builtin_vec_new")) {
258 return MC_(new_block) ( tid, 0, n, VG_(clo_alignment),
259 /*is_zeroed*/False, MC_AllocNewVec, MC_(malloc_list));
263 void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
265 if (complain_about_silly_args(n, "memalign")) {
268 return MC_(new_block) ( tid, 0, n, alignB,
269 /*is_zeroed*/False, MC_AllocMalloc, MC_(malloc_list));
273 void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
275 if (complain_about_silly_args2(nmemb, size1)) {
278 return MC_(new_block) ( tid, 0, nmemb*size1, VG_(clo_alignment),
279 /*is_zeroed*/True, MC_AllocMalloc, MC_(malloc_list));
284 void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
286 if (MC_(clo_free_fill) != -1) {
287 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
288 VG_(memset)((void*)mc->data, MC_(clo_free_fill), mc->szB);
291 /* Note: make redzones noaccess again -- just in case user made them
292 accessible with a client request... */
293 MC_(make_mem_noaccess)( mc->data-rzB, mc->szB + 2*rzB );
295 /* Record where freed */
296 mc->where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
297 /* Put it out of harm's way for a while */
298 add_to_freed_queue ( mc );
301 void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
307 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p );
309 MC_(record_free_error) ( tid, p );
311 /* check if it is a matching free() / delete / delete [] */
312 if (kind != mc->allockind) {
313 tl_assert(p == mc->data);
314 MC_(record_freemismatch_error) ( tid, mc );
316 die_and_free_mem ( tid, mc, rzB );
320 void MC_(free) ( ThreadId tid, void* p )
323 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocMalloc );
326 void MC_(__builtin_delete) ( ThreadId tid, void* p )
329 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNew);
332 void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
335 tid, (Addr)p, MC_MALLOC_REDZONE_SZB, MC_AllocNewVec);
338 void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
345 cmalloc_n_mallocs ++;
346 cmalloc_bs_mallocd += (ULong)new_szB;
348 if (complain_about_silly_args(new_szB, "realloc"))
351 /* Remove the old block */
352 mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
354 MC_(record_free_error) ( tid, (Addr)p_old );
355 /* We return to the program regardless. */
359 /* check if its a matching free() / delete / delete [] */
360 if (MC_AllocMalloc != mc->allockind) {
361 /* can not realloc a range that was allocated with new or new [] */
362 tl_assert((Addr)p_old == mc->data);
363 MC_(record_freemismatch_error) ( tid, mc );
364 /* but keep going anyway */
369 /* In all cases, even when the new size is smaller or unchanged, we
370 reallocate and copy the contents, and make the old block
371 inaccessible. This is so as to guarantee to catch all cases of
372 accesses via the old address after reallocation, regardless of
373 the change in size. (Of course the ability to detect accesses
374 to the old block also depends on the size of the freed blocks
377 if (new_szB <= old_szB) {
378 /* new size is smaller or the same */
381 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
386 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
389 /* Retained part is copied, red zones set as normal */
390 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
391 MC_MALLOC_REDZONE_SZB );
392 MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
393 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
395 /* Copy from old to new */
396 VG_(memcpy)((void*)a_new, p_old, new_szB);
398 /* Possibly fill freed area with specified junk. */
399 if (MC_(clo_free_fill) != -1) {
400 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
401 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
404 /* Free old memory */
405 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
406 than recycling the old one, so that any erroneous accesses to the
407 old memory are reported. */
408 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
410 // Allocate a new chunk.
411 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
414 p_new = (void*)a_new;
417 /* new size is bigger */
419 tl_assert(old_szB < new_szB);
421 a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);
427 ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
429 ecu = VG_(get_ECU_from_ExeContext)(ec);
430 tl_assert(VG_(is_plausible_ECU)(ecu));
432 /* First half kept and copied, second half new, red zones as normal */
433 MC_(make_mem_noaccess)( a_new-MC_MALLOC_REDZONE_SZB,
434 MC_MALLOC_REDZONE_SZB );
435 MC_(copy_address_range_state) ( (Addr)p_old, a_new, mc->szB );
436 MC_(make_mem_undefined_w_otag)( a_new+mc->szB, new_szB-mc->szB,
437 ecu | MC_OKIND_HEAP );
438 MC_(make_mem_noaccess) ( a_new+new_szB, MC_MALLOC_REDZONE_SZB );
440 /* Possibly fill new area with specified junk */
441 if (MC_(clo_malloc_fill) != -1) {
442 tl_assert(MC_(clo_malloc_fill) >= 0x00
443 && MC_(clo_malloc_fill) <= 0xFF);
444 VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill),
448 /* Copy from old to new */
449 VG_(memcpy)((void*)a_new, p_old, mc->szB);
451 /* Possibly fill freed area with specified junk. */
452 if (MC_(clo_free_fill) != -1) {
453 tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
454 VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
457 /* Free old memory */
458 /* Nb: we have to allocate a new MC_Chunk for the new memory rather
459 than recycling the old one, so that any erroneous accesses to the
460 old memory are reported. */
461 die_and_free_mem ( tid, mc, MC_MALLOC_REDZONE_SZB );
463 // Allocate a new chunk.
464 mc = create_MC_Chunk( ec, a_new, new_szB, MC_AllocMalloc );
467 p_new = (void*)a_new;
470 // Now insert the new mc (with a possibly new 'data' field) into
471 // malloc_list. If this realloc() did not increase the memory size, we
472 // will have removed and then re-added mc unnecessarily. But that's ok
473 // because shrinking a block with realloc() is (presumably) much rarer
474 // than growing it, and this way simplifies the growing case.
475 VG_(HT_add_node)( MC_(malloc_list), mc );
480 SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
482 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
484 // There may be slop, but pretend there isn't because only the asked-for
485 // area will be marked as addressable.
486 return ( mc ? mc->szB : 0 );
489 /* This handles the in place resize of a block, as performed by the
490 VALGRIND_RESIZEINPLACE_BLOCK client request. It is unrelated to,
491 and not used for, handling of the normal libc realloc()
493 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
494 SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
496 MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
497 if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
498 /* Reject if: p is not found, or oldSizeB is wrong,
499 or new block would be empty. */
500 MC_(record_free_error) ( tid, p );
504 if (oldSizeB == newSizeB)
508 if (newSizeB < oldSizeB) {
509 MC_(make_mem_noaccess)( p + newSizeB, oldSizeB - newSizeB + rzB );
511 ExeContext* ec = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
512 UInt ecu = VG_(get_ECU_from_ExeContext)(ec);
513 MC_(make_mem_undefined_w_otag)( p + oldSizeB, newSizeB - oldSizeB,
514 ecu | MC_OKIND_HEAP );
516 MC_(make_mem_noaccess)( p + newSizeB, rzB );
521 /*------------------------------------------------------------*/
522 /*--- Memory pool stuff. ---*/
523 /*------------------------------------------------------------*/
525 /* Set to 1 for intensive sanity checking. Is very expensive though
526 and should not be used in production scenarios. See #255966. */
527 #define MP_DETAILED_SANITY_CHECKS 0
529 static void check_mempool_sane(MC_Mempool* mp); /*forward*/
532 void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed)
536 if (VG_(clo_verbosity) > 2) {
537 VG_(message)(Vg_UserMsg, "create_mempool(0x%lx, %d, %d)\n",
538 pool, rzB, is_zeroed);
539 VG_(get_and_pp_StackTrace)
540 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
543 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
545 VG_(tool_panic)("MC_(create_mempool): duplicate pool creation");
548 mp = VG_(malloc)("mc.cm.1", sizeof(MC_Mempool));
551 mp->is_zeroed = is_zeroed;
552 mp->chunks = VG_(HT_construct)( "MC_(create_mempool)" );
553 check_mempool_sane(mp);
555 /* Paranoia ... ensure this area is off-limits to the client, so
556 the mp->data field isn't visible to the leak checker. If memory
557 management is working correctly, anything pointer returned by
558 VG_(malloc) should be noaccess as far as the client is
560 if (!MC_(check_mem_is_noaccess)( (Addr)mp, sizeof(MC_Mempool), NULL )) {
561 VG_(tool_panic)("MC_(create_mempool): shadow area is accessible");
564 VG_(HT_add_node)( MC_(mempool_list), mp );
567 void MC_(destroy_mempool)(Addr pool)
572 if (VG_(clo_verbosity) > 2) {
573 VG_(message)(Vg_UserMsg, "destroy_mempool(0x%lx)\n", pool);
574 VG_(get_and_pp_StackTrace)
575 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
578 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)pool );
581 ThreadId tid = VG_(get_running_tid)();
582 MC_(record_illegal_mempool_error) ( tid, pool );
585 check_mempool_sane(mp);
587 // Clean up the chunks, one by one
588 VG_(HT_ResetIter)(mp->chunks);
589 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
590 /* Note: make redzones noaccess again -- just in case user made them
591 accessible with a client request... */
592 MC_(make_mem_noaccess)(mc->data-mp->rzB, mc->szB + 2*mp->rzB );
594 // Destroy the chunk table
595 VG_(HT_destruct)(mp->chunks);
601 mp_compar(void* n1, void* n2)
603 MC_Chunk* mc1 = *(MC_Chunk**)n1;
604 MC_Chunk* mc2 = *(MC_Chunk**)n2;
605 if (mc1->data < mc2->data) return -1;
606 if (mc1->data > mc2->data) return 1;
611 check_mempool_sane(MC_Mempool* mp)
613 UInt n_chunks, i, bad = 0;
614 static UInt tick = 0;
616 MC_Chunk **chunks = (MC_Chunk**) VG_(HT_to_array)( mp->chunks, &n_chunks );
620 if (VG_(clo_verbosity) > 1) {
623 UInt total_pools = 0, total_chunks = 0;
626 VG_(HT_ResetIter)(MC_(mempool_list));
627 while ( (mp2 = VG_(HT_Next)(MC_(mempool_list))) ) {
629 VG_(HT_ResetIter)(mp2->chunks);
630 while (VG_(HT_Next)(mp2->chunks)) {
635 VG_(message)(Vg_UserMsg,
636 "Total mempools active: %d pools, %d chunks\n",
637 total_pools, total_chunks);
643 VG_(ssort)((void*)chunks, n_chunks, sizeof(VgHashNode*), mp_compar);
645 /* Sanity check; assert that the blocks are now in order */
646 for (i = 0; i < n_chunks-1; i++) {
647 if (chunks[i]->data > chunks[i+1]->data) {
648 VG_(message)(Vg_UserMsg,
649 "Mempool chunk %d / %d is out of order "
650 "wrt. its successor\n",
656 /* Sanity check -- make sure they don't overlap */
657 for (i = 0; i < n_chunks-1; i++) {
658 if (chunks[i]->data + chunks[i]->szB > chunks[i+1]->data ) {
659 VG_(message)(Vg_UserMsg,
660 "Mempool chunk %d / %d overlaps with its successor\n",
667 VG_(message)(Vg_UserMsg,
668 "Bad mempool (%d chunks), dumping chunks for inspection:\n",
670 for (i = 0; i < n_chunks; ++i) {
671 VG_(message)(Vg_UserMsg,
672 "Mempool chunk %d / %d: %ld bytes "
673 "[%lx,%lx), allocated:\n",
676 chunks[i]->szB + 0UL,
678 chunks[i]->data + chunks[i]->szB);
680 VG_(pp_ExeContext)(chunks[i]->where);
686 void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
690 if (VG_(clo_verbosity) > 2) {
691 VG_(message)(Vg_UserMsg, "mempool_alloc(0x%lx, 0x%lx, %ld)\n",
693 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
696 mp = VG_(HT_lookup) ( MC_(mempool_list), (UWord)pool );
698 MC_(record_illegal_mempool_error) ( tid, pool );
700 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
701 MC_(new_block)(tid, addr, szB, /*ignored*/0, mp->is_zeroed,
702 MC_AllocCustom, mp->chunks);
703 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
707 void MC_(mempool_free)(Addr pool, Addr addr)
711 ThreadId tid = VG_(get_running_tid)();
713 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
715 MC_(record_illegal_mempool_error)(tid, pool);
719 if (VG_(clo_verbosity) > 2) {
720 VG_(message)(Vg_UserMsg, "mempool_free(0x%lx, 0x%lx)\n", pool, addr);
721 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
724 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
725 mc = VG_(HT_remove)(mp->chunks, (UWord)addr);
727 MC_(record_free_error)(tid, (Addr)addr);
731 if (VG_(clo_verbosity) > 2) {
732 VG_(message)(Vg_UserMsg,
733 "mempool_free(0x%lx, 0x%lx) freed chunk of %ld bytes\n",
734 pool, addr, mc->szB + 0UL);
737 die_and_free_mem ( tid, mc, mp->rzB );
738 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
742 void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
746 ThreadId tid = VG_(get_running_tid)();
750 if (VG_(clo_verbosity) > 2) {
751 VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
753 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
756 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
758 MC_(record_illegal_mempool_error)(tid, pool);
762 check_mempool_sane(mp);
763 chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
764 if (n_shadows == 0) {
765 tl_assert(chunks == NULL);
769 tl_assert(chunks != NULL);
770 for (i = 0; i < n_shadows; ++i) {
772 Addr lo, hi, min, max;
774 mc = (MC_Chunk*) chunks[i];
777 hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;
779 #define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))
781 if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {
783 /* The current chunk is entirely within the trim extent: keep
788 } else if ( (! EXTENT_CONTAINS(lo)) &&
789 (! EXTENT_CONTAINS(hi)) ) {
791 /* The current chunk is entirely outside the trim extent:
794 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
795 MC_(record_free_error)(tid, (Addr)mc->data);
797 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
800 die_and_free_mem ( tid, mc, mp->rzB );
804 /* The current chunk intersects the trim extent: remove,
805 trim, and reinsert it. */
807 tl_assert(EXTENT_CONTAINS(lo) ||
808 EXTENT_CONTAINS(hi));
809 if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
810 MC_(record_free_error)(tid, (Addr)mc->data);
812 if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
816 if (mc->data < addr) {
824 if (mc->data + szB > addr + szB) {
825 max = mc->data + szB;
832 tl_assert(min <= lo);
834 tl_assert(hi <= max);
836 if (min < lo && !EXTENT_CONTAINS(min)) {
837 MC_(make_mem_noaccess)( min, lo - min);
840 if (hi < max && !EXTENT_CONTAINS(max)) {
841 MC_(make_mem_noaccess)( hi, max - hi );
845 mc->szB = (UInt) (hi - lo);
846 VG_(HT_add_node)( mp->chunks, mc );
849 #undef EXTENT_CONTAINS
852 check_mempool_sane(mp);
856 void MC_(move_mempool)(Addr poolA, Addr poolB)
860 if (VG_(clo_verbosity) > 2) {
861 VG_(message)(Vg_UserMsg, "move_mempool(0x%lx, 0x%lx)\n", poolA, poolB);
862 VG_(get_and_pp_StackTrace)
863 (VG_(get_running_tid)(), MEMPOOL_DEBUG_STACKTRACE_DEPTH);
866 mp = VG_(HT_remove) ( MC_(mempool_list), (UWord)poolA );
869 ThreadId tid = VG_(get_running_tid)();
870 MC_(record_illegal_mempool_error) ( tid, poolA );
875 VG_(HT_add_node)( MC_(mempool_list), mp );
878 void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
882 ThreadId tid = VG_(get_running_tid)();
884 if (VG_(clo_verbosity) > 2) {
885 VG_(message)(Vg_UserMsg, "mempool_change(0x%lx, 0x%lx, 0x%lx, %ld)\n",
886 pool, addrA, addrB, szB);
887 VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
890 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
892 MC_(record_illegal_mempool_error)(tid, pool);
896 check_mempool_sane(mp);
898 mc = VG_(HT_remove)(mp->chunks, (UWord)addrA);
900 MC_(record_free_error)(tid, (Addr)addrA);
906 VG_(HT_add_node)( mp->chunks, mc );
908 check_mempool_sane(mp);
911 Bool MC_(mempool_exists)(Addr pool)
915 mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
923 /*------------------------------------------------------------*/
924 /*--- Statistics printing ---*/
925 /*------------------------------------------------------------*/
927 void MC_(print_malloc_stats) ( void )
933 if (VG_(clo_verbosity) == 0)
938 /* Count memory still in use. */
939 VG_(HT_ResetIter)(MC_(malloc_list));
940 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
942 nbytes += (ULong)mc->szB;
947 " in use at exit: %'llu bytes in %'lu blocks\n"
948 " total heap usage: %'lu allocs, %'lu frees, %'llu bytes allocated\n"
952 cmalloc_n_frees, cmalloc_bs_mallocd
956 /*--------------------------------------------------------------------*/
958 /*--------------------------------------------------------------------*/