2 /*--------------------------------------------------------------------*/
3 /*--- The leak checker. mc_leakcheck.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of MemCheck, a heavyweight Valgrind tool for
8 detecting memory errors.
10 Copyright (C) 2000-2010 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "pub_tool_basics.h"
32 #include "pub_tool_vki.h"
33 #include "pub_tool_aspacehl.h"
34 #include "pub_tool_aspacemgr.h"
35 #include "pub_tool_execontext.h"
36 #include "pub_tool_hashtable.h"
37 #include "pub_tool_libcbase.h"
38 #include "pub_tool_libcassert.h"
39 #include "pub_tool_libcprint.h"
40 #include "pub_tool_libcsignal.h"
41 #include "pub_tool_machine.h"
42 #include "pub_tool_mallocfree.h"
43 #include "pub_tool_options.h"
44 #include "pub_tool_oset.h"
45 #include "pub_tool_signals.h"
46 #include "pub_tool_tooliface.h" // Needed for mc_include.h
48 #include "mc_include.h"
50 #include <setjmp.h> // For jmp_buf
52 /*------------------------------------------------------------*/
53 /*--- An overview of leak checking. ---*/
54 /*------------------------------------------------------------*/
56 // Leak-checking is a directed-graph traversal problem. The graph has
57 // two kinds of nodes:
59 // - GP registers of all threads;
60 // - valid, aligned, pointer-sized data words in valid client memory,
61 // including stacks, but excluding words within client heap-allocated
62 // blocks (they are excluded so that later on we can differentiate
63 // between heap blocks that are indirectly leaked vs. directly leaked).
64 // - heap-allocated blocks. A block is a mempool chunk or a malloc chunk
65 // that doesn't contain a mempool chunk. Nb: the terms "blocks" and
66 // "chunks" are used interchangeably below.
68 // There are two kinds of edges:
69 // - start-pointers, i.e. pointers to the start of a block;
70 // - interior-pointers, i.e. pointers to the interior of a block.
72 // We use "pointers" rather than "edges" below.
74 // Root set nodes only point to blocks. Blocks only point to blocks;
75 // a block can point to itself.
77 // The aim is to traverse the graph and determine the status of each block.
79 // There are 9 distinct cases. See memcheck/docs/mc-manual.xml for details.
80 // Presenting all nine categories to the user is probably too much.
81 // Currently we do this:
82 // - definitely lost: case 3
83 // - indirectly lost: case 4, 9
84 // - possibly lost: cases 5..8
85 // - still reachable: cases 1, 2
87 // It's far from clear that this is the best possible categorisation; it's
88 // accreted over time without any central guiding principle.
90 /*------------------------------------------------------------*/
91 /*--- XXX: Thoughts for improvement. ---*/
92 /*------------------------------------------------------------*/
94 // From the user's point of view:
95 // - If they aren't using interior-pointers, they just have to fix the
96 // directly lost blocks, and the indirectly lost ones will be fixed as
97 // part of that. Any possibly lost blocks will just be due to random
98 // pointer garbage and can be ignored.
100 // - If they are using interior-pointers, the fact that they currently are not
101 // being told which ones might be directly lost vs. indirectly lost makes
102 // it hard to know where to begin.
104 // All this makes me wonder if new option is warranted:
105 // --follow-interior-pointers. By default it would be off, the leak checker
106 // wouldn't follow interior-pointers and there would only be 3 categories:
109 // If turned on, then it would show 7 categories (R, DL, IL, DR/DL, IR/IL,
110 // IR/IL/DL, IL/DL). That output is harder to understand but it's your own
111 // damn fault for using interior-pointers...
115 // Also, why are two blank lines printed between each loss record?
120 // Also, --show-reachable is a bad name because it also turns on the showing
121 // of indirectly leaked blocks(!) It would be better named --show-all or
122 // --show-all-heap-blocks, because that's the end result.
126 // Also, the VALGRIND_LEAK_CHECK and VALGRIND_QUICK_LEAK_CHECK aren't great
127 // names. VALGRIND_FULL_LEAK_CHECK and VALGRIND_SUMMARY_LEAK_CHECK would be
132 // Also, VALGRIND_COUNT_LEAKS and VALGRIND_COUNT_LEAK_BLOCKS aren't great as
133 // they combine direct leaks and indirect leaks into one. New, more precise
134 // ones (they'll need new names) would be good. If more categories are
135 // used, as per the --follow-interior-pointers option, they should be
136 // updated accordingly. And they should use a struct to return the values.
140 // Also, for this case:
142 // (4) p4 BBB ---> AAA
144 // BBB is definitely directly lost. AAA is definitely indirectly lost.
145 // Here's the relevant loss records printed for a full check (each block is
148 // ==20397== 16 bytes in 1 blocks are indirectly lost in loss record 9 of 15
149 // ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
150 // ==20397== by 0x400521: mk (leak-cases.c:49)
151 // ==20397== by 0x400578: main (leak-cases.c:72)
153 // ==20397== 32 (16 direct, 16 indirect) bytes in 1 blocks are definitely
154 // lost in loss record 14 of 15
155 // ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
156 // ==20397== by 0x400521: mk (leak-cases.c:49)
157 // ==20397== by 0x400580: main (leak-cases.c:72)
159 // The first one is fine -- it describes AAA.
161 // The second one is for BBB. It's correct in that 16 bytes in 1 block are
162 // directly lost. It's also correct that 16 are indirectly lost as a result,
163 // but it means that AAA is being counted twice in the loss records. (It's
164 // not, thankfully, counted twice in the summary counts). Argh.
166 // This would be less confusing for the second one:
168 // ==20397== 16 bytes in 1 blocks are definitely lost in loss record 14
169 // of 15 (and 16 bytes in 1 block are indirectly lost as a result; they
170 // are mentioned elsewhere (if --show-reachable=yes is given!))
171 // ==20397== at 0x4C2694E: malloc (vg_replace_malloc.c:177)
172 // ==20397== by 0x400521: mk (leak-cases.c:49)
173 // ==20397== by 0x400580: main (leak-cases.c:72)
175 // But ideally we'd present the loss record for the directly lost block and
176 // then the resultant indirectly lost blocks and make it clear the
177 // dependence. Double argh.
179 /*------------------------------------------------------------*/
180 /*--- The actual algorithm. ---*/
181 /*------------------------------------------------------------*/
183 // - Find all the blocks (a.k.a. chunks) to check. Mempool chunks require
184 // some special treatment because they can be within malloc'd blocks.
185 // - Scan every word in the root set (GP registers and valid
186 // non-heap memory words).
187 // - First, we skip if it doesn't point to valid memory.
188 // - Then, we see if it points to the start or interior of a block. If
189 // so, we push the block onto the mark stack and mark it as having been
191 // - Then, we process the mark stack, repeating the scanning for each block;
192 // this can push more blocks onto the mark stack. We repeat until the
193 // mark stack is empty. Each block is marked as definitely or possibly
194 // reachable, depending on whether interior-pointers were required to
196 // - At this point we know for every block if it's reachable or not.
197 // - We then push each unreached block onto the mark stack, using the block
198 // number as the "clique" number.
199 // - We process the mark stack again, this time grouping blocks into cliques
200 // in order to facilitate the directly/indirectly lost categorisation.
201 // - We group blocks by their ExeContexts and categorisation, and print them
202 // if --leak-check=full. We also print summary numbers.
204 // A note on "cliques":
205 // - A directly lost block is one with no pointers to it. An indirectly
206 // lost block is one that is pointed to by a directly or indirectly lost
208 // - Each directly lost block has zero or more indirectly lost blocks
209 // hanging off it. All these blocks together form a "clique". The
210 // directly lost block is called the "clique leader". The clique number
211 // is the number (in lc_chunks[]) of the clique leader.
212 // - Actually, a directly lost block may be pointed to if it's part of a
213 // cycle. In that case, there may be more than one choice for the clique
214 // leader, and the choice is arbitrary. Eg. if you have A-->B and B-->A
215 // either A or B could be the clique leader.
216 // - Cliques cannot overlap, and will be truncated to avoid this. Eg. if we
217 // have A-->C and B-->C, the two cliques will be {A,C} and {B}, or {A} and
218 // {B,C} (again the choice is arbitrary). This is because we don't want
219 // to count a block as indirectly lost more than once.
221 // A note on 'is_prior_definite':
222 // - This is a boolean used in various places that indicates if the chain
223 // up to the prior node (prior to the one being considered) is definite.
224 // - In the clique == -1 case:
225 // - if True it means that the prior node is a root-set node, or that the
226 // prior node is a block which is reachable from the root-set via
228 // - if False it means that the prior node is a block that is only
229 // reachable from the root-set via a path including at least one
231 // - In the clique != -1 case, currently it's always True because we treat
232 // start-pointers and interior-pointers the same for direct/indirect leak
233 // checking. If we added a PossibleIndirectLeak state then this would
237 // Define to debug the memory-leak-detector.
238 #define VG_DEBUG_LEAKCHECK 0
239 #define VG_DEBUG_CLIQUE 0
242 /*------------------------------------------------------------*/
243 /*--- Getting the initial chunks, and searching them. ---*/
244 /*------------------------------------------------------------*/
246 // Compare the MC_Chunks by 'data' (i.e. the address of the block).
247 static Int compare_MC_Chunks(void* n1, void* n2)
249 MC_Chunk* mc1 = *(MC_Chunk**)n1;
250 MC_Chunk* mc2 = *(MC_Chunk**)n2;
251 if (mc1->data < mc2->data) return -1;
252 if (mc1->data > mc2->data) return 1;
256 #if VG_DEBUG_LEAKCHECK
257 // Used to sanity-check the fast binary-search mechanism.
259 Int find_chunk_for_OLD ( Addr ptr,
266 PROF_EVENT(70, "find_chunk_for_OLD");
267 for (i = 0; i < n_chunks; i++) {
268 PROF_EVENT(71, "find_chunk_for_OLD(loop)");
269 a_lo = chunks[i]->data;
270 a_hi = ((Addr)chunks[i]->data) + chunks[i]->szB;
271 if (a_lo <= ptr && ptr < a_hi)
278 // Find the i such that ptr points at or inside the block described by
279 // chunks[i]. Return -1 if none found. This assumes that chunks[]
280 // has been sorted on the 'data' field.
282 Int find_chunk_for ( Addr ptr,
286 Addr a_mid_lo, a_mid_hi;
287 Int lo, mid, hi, retVal;
288 // VG_(printf)("find chunk for %p = ", ptr);
293 // Invariant: current unsearched space is from lo to hi, inclusive.
294 if (lo > hi) break; // not found
297 a_mid_lo = chunks[mid]->data;
298 a_mid_hi = chunks[mid]->data + chunks[mid]->szB;
299 // Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
300 // Special-case zero-sized blocks - treat them as if they had
301 // size 1. Not doing so causes them to not cover any address
302 // range at all and so will never be identified as the target of
303 // any pointer, which causes them to be incorrectly reported as
304 // definitely leaked.
305 if (chunks[mid]->szB == 0)
308 if (ptr < a_mid_lo) {
312 if (ptr >= a_mid_hi) {
316 tl_assert(ptr >= a_mid_lo && ptr < a_mid_hi);
321 # if VG_DEBUG_LEAKCHECK
322 tl_assert(retVal == find_chunk_for_OLD ( ptr, chunks, n_chunks ));
324 // VG_(printf)("%d\n", retVal);
330 find_active_chunks(UInt* pn_chunks)
332 // Our goal is to construct a set of chunks that includes every
333 // mempool chunk, and every malloc region that *doesn't* contain a
336 MC_Chunk **mallocs, **chunks, *mc;
337 UInt n_mallocs, n_chunks, m, s;
338 Bool *malloc_chunk_holds_a_pool_chunk;
340 // First we collect all the malloc chunks into an array and sort it.
341 // We do this because we want to query the chunks by interior
342 // pointers, requiring binary search.
343 mallocs = (MC_Chunk**) VG_(HT_to_array)( MC_(malloc_list), &n_mallocs );
344 if (n_mallocs == 0) {
345 tl_assert(mallocs == NULL);
349 VG_(ssort)(mallocs, n_mallocs, sizeof(VgHashNode*), compare_MC_Chunks);
351 // Then we build an array containing a Bool for each malloc chunk,
352 // indicating whether it contains any mempools.
353 malloc_chunk_holds_a_pool_chunk = VG_(calloc)( "mc.fas.1",
354 n_mallocs, sizeof(Bool) );
355 n_chunks = n_mallocs;
357 // Then we loop over the mempool tables. For each chunk in each
358 // pool, we set the entry in the Bool array corresponding to the
359 // malloc chunk containing the mempool chunk.
360 VG_(HT_ResetIter)(MC_(mempool_list));
361 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
362 VG_(HT_ResetIter)(mp->chunks);
363 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
365 // We'll need to record this chunk.
368 // Possibly invalidate the malloc holding the beginning of this chunk.
369 m = find_chunk_for(mc->data, mallocs, n_mallocs);
370 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
371 tl_assert(n_chunks > 0);
373 malloc_chunk_holds_a_pool_chunk[m] = True;
376 // Possibly invalidate the malloc holding the end of this chunk.
378 m = find_chunk_for(mc->data + (mc->szB - 1), mallocs, n_mallocs);
379 if (m != -1 && malloc_chunk_holds_a_pool_chunk[m] == False) {
380 tl_assert(n_chunks > 0);
382 malloc_chunk_holds_a_pool_chunk[m] = True;
387 tl_assert(n_chunks > 0);
389 // Create final chunk array.
390 chunks = VG_(malloc)("mc.fas.2", sizeof(VgHashNode*) * (n_chunks));
393 // Copy the mempool chunks and the non-marked malloc chunks into a
394 // combined array of chunks.
395 VG_(HT_ResetIter)(MC_(mempool_list));
396 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
397 VG_(HT_ResetIter)(mp->chunks);
398 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
399 tl_assert(s < n_chunks);
403 for (m = 0; m < n_mallocs; ++m) {
404 if (!malloc_chunk_holds_a_pool_chunk[m]) {
405 tl_assert(s < n_chunks);
406 chunks[s++] = mallocs[m];
409 tl_assert(s == n_chunks);
413 VG_(free)(malloc_chunk_holds_a_pool_chunk);
415 *pn_chunks = n_chunks;
420 /*------------------------------------------------------------*/
421 /*--- The leak detector proper. ---*/
422 /*------------------------------------------------------------*/
424 // Holds extra info about each block during leak checking.
427 UInt state:2; // Reachedness.
428 SizeT indirect_szB : (sizeof(SizeT)*8)-2; // If Unreached, how many bytes
429 // are unreachable from here.
433 // An array holding pointers to every chunk we're checking. Sorted by address.
434 static MC_Chunk** lc_chunks;
435 // How many chunks we're dealing with.
436 static Int lc_n_chunks;
438 // This has the same number of entries as lc_chunks, and each entry
439 // in lc_chunks corresponds with the entry here (ie. lc_chunks[i] and
440 // lc_extras[i] describe the same block).
441 static LC_Extra* lc_extras;
443 // Records chunks that are currently being processed. Each element in the
444 // stack is an index into lc_chunks and lc_extras. Its size is
445 // 'lc_n_chunks' because in the worst case that's how many chunks could be
446 // pushed onto it (actually I think the maximum is lc_n_chunks-1 but let's
448 static Int* lc_markstack;
449 // The index of the top element of the stack; -1 if the stack is empty, 0 if
450 // the stack has one element, 1 if it has two, etc.
451 static Int lc_markstack_top;
453 // Keeps track of how many bytes of memory we've scanned, for printing.
454 // (Nb: We don't keep track of how many register bytes we've scanned.)
455 static SizeT lc_scanned_szB;
458 SizeT MC_(bytes_leaked) = 0;
459 SizeT MC_(bytes_indirect) = 0;
460 SizeT MC_(bytes_dubious) = 0;
461 SizeT MC_(bytes_reachable) = 0;
462 SizeT MC_(bytes_suppressed) = 0;
464 SizeT MC_(blocks_leaked) = 0;
465 SizeT MC_(blocks_indirect) = 0;
466 SizeT MC_(blocks_dubious) = 0;
467 SizeT MC_(blocks_reachable) = 0;
468 SizeT MC_(blocks_suppressed) = 0;
471 // Determines if a pointer is to a chunk. Returns the chunk number et al
472 // via call-by-reference.
474 lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
481 if (!VG_(am_is_valid_for_client)(ptr, 1, VKI_PROT_READ)) {
484 ch_no = find_chunk_for(ptr, lc_chunks, lc_n_chunks);
485 tl_assert(ch_no >= -1 && ch_no < lc_n_chunks);
490 // Ok, we've found a pointer to a chunk. Get the MC_Chunk and its
492 ch = lc_chunks[ch_no];
493 ex = &(lc_extras[ch_no]);
495 tl_assert(ptr >= ch->data);
496 tl_assert(ptr < ch->data + ch->szB + (ch->szB==0 ? 1 : 0));
498 if (VG_DEBUG_LEAKCHECK)
499 VG_(printf)("ptr=%#lx -> block %d\n", ptr, ch_no);
510 // Push a chunk (well, just its index) onto the mark stack.
511 static void lc_push(Int ch_no, MC_Chunk* ch)
514 VG_(printf)("pushing %#lx-%#lx\n", ch->data, ch->data + ch->szB);
517 tl_assert(lc_markstack_top < lc_n_chunks);
518 lc_markstack[lc_markstack_top] = ch_no;
521 // Return the index of the chunk on the top of the mark stack, or -1 if
523 static Bool lc_pop(Int* ret)
525 if (-1 == lc_markstack_top) {
528 tl_assert(0 <= lc_markstack_top && lc_markstack_top < lc_n_chunks);
529 *ret = lc_markstack[lc_markstack_top];
536 // If 'ptr' is pointing to a heap-allocated block which hasn't been seen
537 // before, push it onto the mark stack.
539 lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
545 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
548 // Only push it if it hasn't been seen previously.
549 if (ex->state == Unreached) {
553 // Possibly upgrade the state, ie. one of:
554 // - Unreached --> Possible
555 // - Unreached --> Reachable
556 // - Possible --> Reachable
557 if (ptr == ch->data && is_prior_definite) {
558 // 'ptr' points to the start of the block, and the prior node is
559 // definite, which means that this block is definitely reachable.
560 ex->state = Reachable;
562 } else if (ex->state == Unreached) {
563 // Either 'ptr' is a interior-pointer, or the prior node isn't definite,
564 // which means that we can only mark this block as possibly reachable.
565 ex->state = Possible;
570 lc_push_if_a_chunk_ptr_register(Addr ptr)
572 lc_push_without_clique_if_a_chunk_ptr(ptr, /*is_prior_definite*/True);
575 // If ptr is pointing to a heap-allocated block which hasn't been seen
576 // before, push it onto the mark stack. Clique is the index of the
579 lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique)
585 tl_assert(0 <= clique && clique < lc_n_chunks);
587 if ( ! lc_is_a_chunk_ptr(ptr, &ch_no, &ch, &ex) )
590 // If it's not Unreached, it's already been handled so ignore it.
591 // If ch_no==clique, it's the clique leader, which means this is a cyclic
592 // structure; again ignore it because it's already been handled.
593 if (ex->state == Unreached && ch_no != clique) {
594 // Note that, unlike reachable blocks, we currently don't distinguish
595 // between start-pointers and interior-pointers here. We probably
597 ex->state = IndirectLeak;
600 // Add the block to the clique, and add its size to the
601 // clique-leader's indirect size. Also, if the new block was
602 // itself a clique leader, it isn't any more, so add its
603 // indirect_szB to the new clique leader.
604 if (VG_DEBUG_CLIQUE) {
605 if (ex->indirect_szB > 0)
606 VG_(printf)(" clique %d joining clique %d adding %lu+%lu\n",
607 ch_no, clique, (SizeT)ch->szB, (SizeT)ex->indirect_szB);
609 VG_(printf)(" block %d joining clique %d adding %lu\n",
610 ch_no, clique, (SizeT)ch->szB);
613 lc_extras[clique].indirect_szB += ch->szB;
614 lc_extras[clique].indirect_szB += ex->indirect_szB;
615 ex->indirect_szB = 0; // Shouldn't matter.
620 lc_push_if_a_chunk_ptr(Addr ptr, Int clique, Bool is_prior_definite)
623 lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
625 lc_push_with_clique_if_a_chunk_ptr(ptr, clique);
629 static jmp_buf memscan_jmpbuf;
632 void scan_all_valid_memory_catcher ( Int sigNo, Addr addr )
635 VG_(printf)("OUCH! sig=%d addr=%#lx\n", sigNo, addr);
636 if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS)
637 __builtin_longjmp(memscan_jmpbuf, 1);
640 // Scan a block of memory between [start, start+len). This range may
641 // be bogus, inaccessable, or otherwise strange; we deal with it. For each
642 // valid aligned word we assume it's a pointer to a chunk a push the chunk
643 // onto the mark stack if so.
645 lc_scan_memory(Addr start, SizeT len, Bool is_prior_definite, Int clique)
647 Addr ptr = VG_ROUNDUP(start, sizeof(Addr));
648 Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
649 vki_sigset_t sigmask;
651 if (VG_DEBUG_LEAKCHECK)
652 VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);
654 VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &sigmask);
655 VG_(set_fault_catcher)(scan_all_valid_memory_catcher);
657 // We might be in the middle of a page. Do a cheap check to see if
658 // it's valid; if not, skip onto the next page.
659 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ))
660 ptr = VG_PGROUNDUP(ptr+1); // First page is bad - skip it.
665 // Skip invalid chunks.
666 if ( ! MC_(is_within_valid_secondary)(ptr) ) {
667 ptr = VG_ROUNDUP(ptr+1, SM_SIZE);
671 // Look to see if this page seems reasonable.
672 if ((ptr % VKI_PAGE_SIZE) == 0) {
673 if (!VG_(am_is_valid_for_client)(ptr, sizeof(Addr), VKI_PROT_READ)) {
674 ptr += VKI_PAGE_SIZE; // Bad page - skip it.
679 if (__builtin_setjmp(memscan_jmpbuf) == 0) {
680 if ( MC_(is_valid_aligned_word)(ptr) ) {
681 lc_scanned_szB += sizeof(Addr);
683 // If we get here, the scanned word is in valid memory. Now
684 // let's see if its contents point to a chunk.
685 lc_push_if_a_chunk_ptr(addr, clique, is_prior_definite);
686 } else if (0 && VG_DEBUG_LEAKCHECK) {
687 VG_(printf)("%#lx not valid\n", ptr);
691 // We need to restore the signal mask, because we were
692 // longjmped out of a signal handler.
693 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
695 ptr = VG_PGROUNDUP(ptr+1); // Bad page - skip it.
699 VG_(sigprocmask)(VKI_SIG_SETMASK, &sigmask, NULL);
700 VG_(set_fault_catcher)(NULL);
704 // Process the mark stack until empty.
705 static void lc_process_markstack(Int clique)
707 Int top = -1; // shut gcc up
708 Bool is_prior_definite;
710 while (lc_pop(&top)) {
711 tl_assert(top >= 0 && top < lc_n_chunks);
713 // See comment about 'is_prior_definite' at the top to understand this.
714 is_prior_definite = ( Possible != lc_extras[top].state );
716 lc_scan_memory(lc_chunks[top]->data, lc_chunks[top]->szB,
717 is_prior_definite, clique);
721 static Word cmp_LossRecordKey_LossRecord(const void* key, const void* elem)
723 LossRecordKey* a = (LossRecordKey*)key;
724 LossRecordKey* b = &(((LossRecord*)elem)->key);
726 // Compare on states first because that's fast.
727 if (a->state < b->state) return -1;
728 if (a->state > b->state) return 1;
729 // Ok, the states are equal. Now compare the locations, which is slower.
730 if (VG_(eq_ExeContext)(
731 MC_(clo_leak_resolution), a->allocated_at, b->allocated_at))
733 // Different locations. Ordering is arbitrary, just use the ec pointer.
734 if (a->allocated_at < b->allocated_at) return -1;
735 if (a->allocated_at > b->allocated_at) return 1;
736 VG_(tool_panic)("bad LossRecord comparison");
739 static Int cmp_LossRecords(void* va, void* vb)
741 LossRecord* lr_a = *(LossRecord**)va;
742 LossRecord* lr_b = *(LossRecord**)vb;
743 SizeT total_szB_a = lr_a->szB + lr_a->indirect_szB;
744 SizeT total_szB_b = lr_b->szB + lr_b->indirect_szB;
746 // First compare by sizes.
747 if (total_szB_a < total_szB_b) return -1;
748 if (total_szB_a > total_szB_b) return 1;
749 // If size are equal, compare by states.
750 if (lr_a->key.state < lr_b->key.state) return -1;
751 if (lr_a->key.state > lr_b->key.state) return 1;
752 // If they're still equal here, it doesn't matter that much, but we keep
753 // comparing other things so that regtests are as deterministic as
754 // possible. So: compare num_blocks.
755 if (lr_a->num_blocks < lr_b->num_blocks) return -1;
756 if (lr_a->num_blocks > lr_b->num_blocks) return 1;
757 // Finally, compare ExeContext addresses... older ones are likely to have
759 if (lr_a->key.allocated_at < lr_b->key.allocated_at) return -1;
760 if (lr_a->key.allocated_at > lr_b->key.allocated_at) return 1;
764 static void print_results(ThreadId tid, Bool is_full_check)
766 Int i, n_lossrecords;
768 LossRecord** lr_array;
772 // Create the lr_table, which holds the loss records.
774 VG_(OSetGen_Create)(offsetof(LossRecord, key),
775 cmp_LossRecordKey_LossRecord,
776 VG_(malloc), "mc.pr.1",
779 // Convert the chunks into loss records, merging them where appropriate.
780 for (i = 0; i < lc_n_chunks; i++) {
781 MC_Chunk* ch = lc_chunks[i];
782 LC_Extra* ex = &(lc_extras)[i];
785 lrkey.state = ex->state;
786 lrkey.allocated_at = ch->where;
788 old_lr = VG_(OSetGen_Lookup)(lr_table, &lrkey);
790 // We found an existing loss record matching this chunk. Update the
791 // loss record's details in-situ. This is safe because we don't
792 // change the elements used as the OSet key.
793 old_lr->szB += ch->szB;
794 old_lr->indirect_szB += ex->indirect_szB;
795 old_lr->num_blocks++;
797 // No existing loss record matches this chunk. Create a new loss
798 // record, initialise it from the chunk, and insert it into lr_table.
799 lr = VG_(OSetGen_AllocNode)(lr_table, sizeof(LossRecord));
802 lr->indirect_szB = ex->indirect_szB;
804 VG_(OSetGen_Insert)(lr_table, lr);
807 n_lossrecords = VG_(OSetGen_Size)(lr_table);
809 // Create an array of pointers to the loss records.
810 lr_array = VG_(malloc)("mc.pr.2", n_lossrecords * sizeof(LossRecord*));
812 VG_(OSetGen_ResetIter)(lr_table);
813 while ( (lr = VG_(OSetGen_Next)(lr_table)) ) {
816 tl_assert(i == n_lossrecords);
818 // Sort the array by loss record sizes.
819 VG_(ssort)(lr_array, n_lossrecords, sizeof(LossRecord*),
823 MC_(blocks_leaked) = MC_(bytes_leaked) = 0;
824 MC_(blocks_indirect) = MC_(bytes_indirect) = 0;
825 MC_(blocks_dubious) = MC_(bytes_dubious) = 0;
826 MC_(blocks_reachable) = MC_(bytes_reachable) = 0;
827 MC_(blocks_suppressed) = MC_(bytes_suppressed) = 0;
829 // Print the loss records (in size order) and collect summary stats.
830 for (i = 0; i < n_lossrecords; i++) {
831 Bool count_as_error, print_record;
832 // Rules for printing:
833 // - We don't show suppressed loss records ever (and that's controlled
834 // within the error manager).
835 // - We show non-suppressed loss records that are not "reachable" if
837 // - We show all non-suppressed loss records if --leak-check=yes and
838 // --show-reachable=yes.
840 // Nb: here "reachable" means Reachable *or* IndirectLeak; note that
841 // this is different to "still reachable" used elsewhere because it
842 // includes indirectly lost blocks!
845 print_record = is_full_check &&
846 ( MC_(clo_show_reachable) ||
847 Unreached == lr->key.state ||
848 Possible == lr->key.state );
849 // We don't count a leaks as errors with --leak-check=summary.
850 // Otherwise you can get high error counts with few or no error
851 // messages, which can be confusing. Also, you could argue that
852 // indirect leaks should be counted as errors, but it seems better to
853 // make the counting criteria similar to the printing criteria. So we
855 count_as_error = is_full_check &&
856 ( Unreached == lr->key.state ||
857 Possible == lr->key.state );
859 MC_(record_leak_error) ( tid, i+1, n_lossrecords, lr, print_record,
863 MC_(blocks_suppressed) += lr->num_blocks;
864 MC_(bytes_suppressed) += lr->szB;
866 } else if (Unreached == lr->key.state) {
867 MC_(blocks_leaked) += lr->num_blocks;
868 MC_(bytes_leaked) += lr->szB;
870 } else if (IndirectLeak == lr->key.state) {
871 MC_(blocks_indirect) += lr->num_blocks;
872 MC_(bytes_indirect) += lr->szB;
874 } else if (Possible == lr->key.state) {
875 MC_(blocks_dubious) += lr->num_blocks;
876 MC_(bytes_dubious) += lr->szB;
878 } else if (Reachable == lr->key.state) {
879 MC_(blocks_reachable) += lr->num_blocks;
880 MC_(bytes_reachable) += lr->szB;
883 VG_(tool_panic)("unknown loss mode");
887 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
888 VG_(umsg)("LEAK SUMMARY:\n");
889 VG_(umsg)(" definitely lost: %'lu bytes in %'lu blocks\n",
890 MC_(bytes_leaked), MC_(blocks_leaked) );
891 VG_(umsg)(" indirectly lost: %'lu bytes in %'lu blocks\n",
892 MC_(bytes_indirect), MC_(blocks_indirect) );
893 VG_(umsg)(" possibly lost: %'lu bytes in %'lu blocks\n",
894 MC_(bytes_dubious), MC_(blocks_dubious) );
895 VG_(umsg)(" still reachable: %'lu bytes in %'lu blocks\n",
896 MC_(bytes_reachable), MC_(blocks_reachable) );
897 VG_(umsg)(" suppressed: %'lu bytes in %'lu blocks\n",
898 MC_(bytes_suppressed), MC_(blocks_suppressed) );
899 if (!is_full_check &&
900 (MC_(blocks_leaked) + MC_(blocks_indirect) +
901 MC_(blocks_dubious) + MC_(blocks_reachable)) > 0) {
902 VG_(umsg)("Rerun with --leak-check=full to see details "
903 "of leaked memory\n");
906 MC_(blocks_reachable) > 0 && !MC_(clo_show_reachable))
908 VG_(umsg)("Reachable blocks (those to which a pointer "
909 "was found) are not shown.\n");
910 VG_(umsg)("To see them, rerun with: --leak-check=full "
911 "--show-reachable=yes\n");
917 /*------------------------------------------------------------*/
918 /*--- Top-level entry point. ---*/
919 /*------------------------------------------------------------*/
921 void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckMode mode )
925 tl_assert(mode != LC_Off);
927 // Get the chunks, stop if there were none.
928 lc_chunks = find_active_chunks(&lc_n_chunks);
929 if (lc_n_chunks == 0) {
930 tl_assert(lc_chunks == NULL);
931 if (VG_(clo_verbosity) >= 1 && !VG_(clo_xml)) {
932 VG_(umsg)("All heap blocks were freed -- no leaks are possible\n");
938 // Sort the array so blocks are in ascending order in memory.
939 VG_(ssort)(lc_chunks, lc_n_chunks, sizeof(VgHashNode*), compare_MC_Chunks);
941 // Sanity check -- make sure they're in order.
942 for (i = 0; i < lc_n_chunks-1; i++) {
943 tl_assert( lc_chunks[i]->data <= lc_chunks[i+1]->data);
946 // Sanity check -- make sure they don't overlap. The one exception is that
947 // we allow a MALLOCLIKE block to sit entirely within a malloc() block.
948 // This is for bug 100628. If this occurs, we ignore the malloc() block
949 // for leak-checking purposes. This is a hack and probably should be done
950 // better, but at least it's consistent with mempools (which are treated
951 // like this in find_active_chunks). Mempools have a separate VgHashTable
952 // for mempool chunks, but if custom-allocated blocks are put in a separate
953 // table from normal heap blocks it makes free-mismatch checking more
956 // If this check fails, it probably means that the application
957 // has done something stupid with VALGRIND_MALLOCLIKE_BLOCK client
958 // requests, eg. has made overlapping requests (which are
959 // nonsensical), or used VALGRIND_MALLOCLIKE_BLOCK for stack locations;
960 // again nonsensical.
962 for (i = 0; i < lc_n_chunks-1; i++) {
963 MC_Chunk* ch1 = lc_chunks[i];
964 MC_Chunk* ch2 = lc_chunks[i+1];
966 Addr start1 = ch1->data;
967 Addr start2 = ch2->data;
968 Addr end1 = ch1->data + ch1->szB - 1;
969 Addr end2 = ch2->data + ch2->szB - 1;
970 Bool isCustom1 = ch1->allockind == MC_AllocCustom;
971 Bool isCustom2 = ch2->allockind == MC_AllocCustom;
974 // Normal case - no overlap.
976 // We used to allow exact duplicates, I'm not sure why. --njn
977 //} else if (start1 == start2 && end1 == end2) {
978 // Degenerate case: exact duplicates.
980 } else if (start1 >= start2 && end1 <= end2 && isCustom1 && !isCustom2) {
981 // Block i is MALLOCLIKE and entirely within block i+1.
983 for (j = i+1; j < lc_n_chunks-1; j++) {
984 lc_chunks[j] = lc_chunks[j+1];
988 } else if (start2 >= start1 && end2 <= end1 && isCustom2 && !isCustom1) {
989 // Block i+1 is MALLOCLIKE and entirely within block i.
991 for (j = i; j < lc_n_chunks-1; j++) {
992 lc_chunks[j] = lc_chunks[j+1];
997 VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx",
998 start1, end1, start1, end2);
999 VG_(umsg)("This is usually caused by using VALGRIND_MALLOCLIKE_BLOCK");
1000 VG_(umsg)("in an inappropriate way.");
1005 // Initialise lc_extras.
1006 lc_extras = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(LC_Extra) );
1007 for (i = 0; i < lc_n_chunks; i++) {
1008 lc_extras[i].state = Unreached;
1009 lc_extras[i].indirect_szB = 0;
1012 // Initialise lc_markstack.
1013 lc_markstack = VG_(malloc)( "mc.dml.2", lc_n_chunks * sizeof(Int) );
1014 for (i = 0; i < lc_n_chunks; i++) {
1015 lc_markstack[i] = -1;
1017 lc_markstack_top = -1;
1020 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
1021 VG_(umsg)( "Searching for pointers to %'d not-freed blocks\n",
1025 // Scan the memory root-set, pushing onto the mark stack any blocks
1029 Addr* seg_starts = VG_(get_segment_starts)( &n_seg_starts );
1031 tl_assert(seg_starts && n_seg_starts > 0);
1035 // VG_(am_show_nsegments)( 0, "leakcheck");
1036 for (i = 0; i < n_seg_starts; i++) {
1038 NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
1041 if (seg->kind != SkFileC && seg->kind != SkAnonC) continue;
1042 if (!(seg->hasR && seg->hasW)) continue;
1043 if (seg->isCH) continue;
1045 // Don't poke around in device segments as this may cause
1046 // hangs. Exclude /dev/zero just in case someone allocated
1047 // memory by explicitly mapping /dev/zero.
1048 if (seg->kind == SkFileC
1049 && (VKI_S_ISCHR(seg->mode) || VKI_S_ISBLK(seg->mode))) {
1050 HChar* dev_name = VG_(am_get_filename)( (NSegment*)seg );
1051 if (dev_name && 0 == VG_(strcmp)(dev_name, "/dev/zero")) {
1052 // Don't skip /dev/zero.
1054 // Skip this device mapping.
1060 VG_(printf)("ACCEPT %2d %#lx %#lx\n", i, seg->start, seg->end);
1062 // Scan the segment. We use -1 for the clique number, because this
1064 seg_size = seg->end - seg->start + 1;
1065 if (VG_(clo_verbosity) > 2) {
1066 VG_(message)(Vg_DebugMsg,
1067 " Scanning root segment: %#lx..%#lx (%lu)\n",
1068 seg->start, seg->end, seg_size);
1070 lc_scan_memory(seg->start, seg_size, /*is_prior_definite*/True, -1);
1074 // Scan GP registers for chunk pointers.
1075 VG_(apply_to_GP_regs)(lc_push_if_a_chunk_ptr_register);
1077 // Process the pushed blocks. After this, every block that is reachable
1078 // from the root-set has been traced.
1079 lc_process_markstack(/*clique*/-1);
1081 if (VG_(clo_verbosity) > 1 && !VG_(clo_xml)) {
1082 VG_(umsg)("Checked %'lu bytes\n", lc_scanned_szB);
1086 // Trace all the leaked blocks to determine which are directly leaked and
1087 // which are indirectly leaked. For each Unreached block, push it onto
1088 // the mark stack, and find all the as-yet-Unreached blocks reachable
1089 // from it. These form a clique and are marked IndirectLeak, and their
1090 // size is added to the clique leader's indirect size. If one of the
1091 // found blocks was itself a clique leader (from a previous clique), then
1092 // the cliques are merged.
1093 for (i = 0; i < lc_n_chunks; i++) {
1094 MC_Chunk* ch = lc_chunks[i];
1095 LC_Extra* ex = &(lc_extras[i]);
1097 if (VG_DEBUG_CLIQUE)
1098 VG_(printf)("cliques: %d at %#lx -> Loss state %d\n",
1099 i, ch->data, ex->state);
1101 tl_assert(lc_markstack_top == -1);
1103 if (ex->state == Unreached) {
1104 if (VG_DEBUG_CLIQUE)
1105 VG_(printf)("%d: gathering clique %#lx\n", i, ch->data);
1107 // Push this Unreached block onto the stack and process it.
1109 lc_process_markstack(i);
1111 tl_assert(lc_markstack_top == -1);
1112 tl_assert(ex->state == Unreached);
1116 print_results( tid, ( mode == LC_Full ? True : False ) );
1118 VG_(free) ( lc_chunks );
1119 VG_(free) ( lc_extras );
1120 VG_(free) ( lc_markstack );
1123 /*--------------------------------------------------------------------*/
1125 /*--------------------------------------------------------------------*/