1 //--------------------------------------------------------------------*/
2 //--- Massif: a heap profiling tool. ms_main.c ---*/
3 //--------------------------------------------------------------------*/
6 This file is part of Massif, a Valgrind tool for profiling memory
9 Copyright (C) 2003-2010 Nicholas Nethercote
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 The GNU General Public License is contained in the file COPYING.
30 //---------------------------------------------------------------------------
32 //---------------------------------------------------------------------------
33 // Todo -- nice, but less critical:
34 // - do a graph-drawing test
35 // - make file format more generic. Obstacles:
36 // - unit prefixes are not generic
37 // - preset column widths for stats are not generic
38 // - preset column headers are not generic
39 // - "Massif arguments:" line is not generic
40 // - do snapshots on client requests
41 // - (Michael Meeks): have an interactive way to request a dump
42 // (callgrind_control-style)
44 // - "show me the extra allocations since the last snapshot"
45 // - "start/stop logging" (eg. quickly skip boring bits)
46 // - Add ability to draw multiple graphs, eg. heap-only, stack-only, total.
47 // Give each graph a title. (try to do it generically!)
48 // - allow truncation of long fnnames if the exact line number is
49 // identified? [hmm, could make getting the name of alloc-fns more
50 // difficult] [could dump full names to file, truncate in ms_print]
51 // - make --show-below-main=no work
52 // - Options like --alloc-fn='operator new(unsigned, std::nothrow_t const&)'
53 // don't work in a .valgrindrc file or in $VALGRIND_OPTS.
54 // m_commandline.c:add_args_from_string() needs to respect single quotes.
55 // - With --stack=yes, want to add a stack trace for detailed snapshots so
56 // it's clear where/why the peak is occurring. (Mattieu Castet) Also,
57 // possibly useful even with --stack=no? (Andi Yin)
60 // - To run the benchmarks:
62 // perl perf/vg_perf --tools=massif --reps=3 perf/{heap,tinycc} massif
63 // time valgrind --tool=massif --depth=100 konqueror
65 // The other benchmarks don't do much allocation, and so give similar speeds
68 // Timing results on 'nevermore' (njn's machine) as of r7013:
70 // heap 0.53s ma:12.4s (23.5x, -----)
71 // tinycc 0.46s ma: 4.9s (10.7x, -----)
72 // many-xpts 0.08s ma: 2.0s (25.0x, -----)
73 // konqueror 29.6s real 0:21.0s user
75 // [Introduction of --time-unit=i as the default slowed things down by
78 // - get_XCon accounts for about 9% of konqueror startup time. Try
79 // keeping XPt children sorted by 'ip' and use binary search in get_XCon.
80 // Requires factoring out binary search code from various places into a
81 // VG_(bsearch) function.
83 // Todo -- low priority:
84 // - In each XPt, record both bytes and the number of allocations, and
85 // possibly the global number of allocations.
86 // - (Andy Lin) Give a stack trace on detailed snapshots?
87 // - (Artur Wisz) add a feature to Massif to ignore any heap blocks larger
88 // than a certain size! Because: "linux's malloc allows to set a
89 // MMAP_THRESHOLD value, so we set it to 4096 - all blocks above that will
90 // be handled directly by the kernel, and are guaranteed to be returned to
91 // the system when freed. So we needed to profile only blocks below this
94 // File format working notes:
97 desc: --heap-admin=foo
116 n1: 5 (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
117 n1: 5 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
118 n1: 5 0x279DE6: _nl_load_locale_from_archive (in /lib/libc-2.3.5.so)
119 n1: 5 0x278E97: _nl_find_locale (in /lib/libc-2.3.5.so)
120 n1: 5 0x278871: setlocale (in /lib/libc-2.3.5.so)
121 n1: 5 0x8049821: (within /bin/date)
122 n0: 5 0x26ED5E: (below main) (in /lib/libc-2.3.5.so)
125 n_events: n time(ms) total(B) useful-heap(B) admin-heap(B) stacks(B)
133 - each snapshot specifies an x-axis value and one or more y-axis values.
134 - can display the y-axis values separately if you like
135 - can completely separate connection between snapshots and trees.
138 - how to specify and scale/abbreviate units on axes?
139 - how to combine multiple values into the y-axis?
141 --------------------------------------------------------------------------------Command: date
142 Massif arguments: --heap-admin=foo
143 ms_print arguments: massif.out
144 --------------------------------------------------------------------------------
149 | ::@ :@ :@ :@:::# :: : ::::
150 0 +-----------------------------------@---@---@-----@--@---#-------------->ms 0 713
152 Number of snapshots: 50
153 Detailed snapshots: [2, 11, 13, 19, 25, 32 (peak)]
154 -------------------------------------------------------------------------------- n time(ms) total(B) useful-heap(B) admin-heap(B) stacks(B)
155 -------------------------------------------------------------------------------- 0 0 0 0 0 0
158 100.00% (5B) (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
159 ->100.00% (5B) 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
162 //---------------------------------------------------------------------------
164 #include "pub_tool_basics.h"
165 #include "pub_tool_vki.h"
166 #include "pub_tool_aspacemgr.h"
167 #include "pub_tool_debuginfo.h"
168 #include "pub_tool_hashtable.h"
169 #include "pub_tool_libcbase.h"
170 #include "pub_tool_libcassert.h"
171 #include "pub_tool_libcfile.h"
172 #include "pub_tool_libcprint.h"
173 #include "pub_tool_libcproc.h"
174 #include "pub_tool_machine.h"
175 #include "pub_tool_mallocfree.h"
176 #include "pub_tool_options.h"
177 #include "pub_tool_replacemalloc.h"
178 #include "pub_tool_stacktrace.h"
179 #include "pub_tool_tooliface.h"
180 #include "pub_tool_xarray.h"
181 #include "pub_tool_clientstate.h"
183 #include "valgrind.h" // For {MALLOC,FREE}LIKE_BLOCK
185 //------------------------------------------------------------*/
186 //--- Overview of operation ---*/
187 //------------------------------------------------------------*/
189 // The size of the stacks and heap is tracked. The heap is tracked in a lot
190 // of detail, enough to tell how many bytes each line of code is responsible
191 // for, more or less. The main data structure is a tree representing the
192 // call tree beneath all the allocation functions like malloc().
194 // "Snapshots" are recordings of the memory usage. There are two basic
196 // - Normal: these record the current time, total memory size, total heap
197 // size, heap admin size and stack size.
198 // - Detailed: these record those things in a normal snapshot, plus a very
199 // detailed XTree (see below) indicating how the heap is structured.
201 // Snapshots are taken every so often. There are two storage classes of
203 // - Temporary: Massif does a temporary snapshot every so often. The idea
204 // is to always have a certain number of temporary snapshots around. So
205 // we take them frequently to begin with, but decreasingly often as the
206 // program continues to run. Also, we remove some old ones after a while.
207 // Overall it's a kind of exponential decay thing. Most of these are
208 // normal snapshots, a small fraction are detailed snapshots.
209 // - Permanent: Massif takes a permanent (detailed) snapshot in some
210 // circumstances. They are:
211 // - Peak snapshot: When the memory usage peak is reached, it takes a
212 // snapshot. It keeps this, unless the peak is subsequently exceeded,
213 // in which case it will overwrite the peak snapshot.
214 // - User-requested snapshots: These are done in response to client
215 // requests. They are always kept.
217 // Used for printing things when clo_verbosity > 1.
218 #define VERB(verb, format, args...) \
219 if (VG_(clo_verbosity) > verb) { \
220 VG_(dmsg)("Massif: " format, ##args); \
223 // Used for printing stats when clo_stats == True.
224 #define STATS(format, args...) \
225 if (VG_(clo_stats)) { \
226 VG_(dmsg)("Massif: " format, ##args); \
229 //------------------------------------------------------------//
230 //--- Statistics ---//
231 //------------------------------------------------------------//
233 // Konqueror startup, to give an idea of the numbers involved with a biggish
234 // program, with default depth:
237 // - 310,000 allocations
239 // - 15,000 XPts 800,000 XPts
242 static UInt n_heap_allocs = 0;
243 static UInt n_heap_reallocs = 0;
244 static UInt n_heap_frees = 0;
245 static UInt n_ignored_heap_allocs = 0;
246 static UInt n_ignored_heap_frees = 0;
247 static UInt n_ignored_heap_reallocs = 0;
248 static UInt n_stack_allocs = 0;
249 static UInt n_stack_frees = 0;
250 static UInt n_xpts = 0;
251 static UInt n_xpt_init_expansions = 0;
252 static UInt n_xpt_later_expansions = 0;
253 static UInt n_sxpt_allocs = 0;
254 static UInt n_sxpt_frees = 0;
255 static UInt n_skipped_snapshots = 0;
256 static UInt n_real_snapshots = 0;
257 static UInt n_detailed_snapshots = 0;
258 static UInt n_peak_snapshots = 0;
259 static UInt n_cullings = 0;
260 static UInt n_XCon_redos = 0;
262 //------------------------------------------------------------//
264 //------------------------------------------------------------//
266 // Number of guest instructions executed so far. Only used with
268 static Long guest_instrs_executed = 0;
270 static SizeT heap_szB = 0; // Live heap size
271 static SizeT heap_extra_szB = 0; // Live heap extra size -- slop + admin bytes
272 static SizeT stacks_szB = 0; // Live stacks size
274 // This is the total size from the current peak snapshot, or 0 if no peak
275 // snapshot has been taken yet.
276 static SizeT peak_snapshot_total_szB = 0;
278 // Incremented every time memory is allocated/deallocated, by the
279 // allocated/deallocated amount; includes heap, heap-admin and stack
280 // memory. An alternative to milliseconds as a unit of program "time".
281 static ULong total_allocs_deallocs_szB = 0;
283 // We don't start taking snapshots until the first basic block is executed,
284 // rather than doing it in ms_post_clo_init (which is the obvious spot), for
286 // - It lets us ignore stack events prior to that, because they're not
287 // really proper ones and just would screw things up.
288 // - Because there's still some core initialisation to do, and so there
289 // would be an artificial time gap between the first and second snapshots.
291 static Bool have_started_executing_code = False;
293 //------------------------------------------------------------//
294 //--- Alloc fns ---//
295 //------------------------------------------------------------//
297 static XArray* alloc_fns;
298 static XArray* ignore_fns;
300 static void init_alloc_fns(void)
302 // Create the list, and add the default elements.
303 alloc_fns = VG_(newXA)(VG_(malloc), "ms.main.iaf.1",
304 VG_(free), sizeof(Char*));
305 #define DO(x) { Char* s = x; VG_(addToXA)(alloc_fns, &s); }
307 // Ordered roughly according to (presumed) frequency.
308 // Nb: The C++ "operator new*" ones are overloadable. We include them
309 // always anyway, because even if they're overloaded, it would be a
310 // prodigiously stupid overloading that caused them to not allocate
313 // XXX: because we don't look at the first stack entry (unless it's a
314 // custom allocation) there's not much point to having all these alloc
315 // functions here -- they should never appear anywhere (I think?) other
316 // than the top stack entry. The only exceptions are those that in
317 // vg_replace_malloc.c are partly or fully implemented in terms of another
318 // alloc function: realloc (which uses malloc); valloc,
319 // malloc_zone_valloc, posix_memalign and memalign_common (which use
323 DO("__builtin_new" );
324 DO("operator new(unsigned)" );
325 DO("operator new(unsigned long)" );
326 DO("__builtin_vec_new" );
327 DO("operator new[](unsigned)" );
328 DO("operator new[](unsigned long)" );
332 DO("posix_memalign" );
334 DO("operator new(unsigned, std::nothrow_t const&)" );
335 DO("operator new[](unsigned, std::nothrow_t const&)" );
336 DO("operator new(unsigned long, std::nothrow_t const&)" );
337 DO("operator new[](unsigned long, std::nothrow_t const&)");
338 #if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
339 DO("malloc_common" );
340 DO("calloc_common" );
341 DO("realloc_common" );
342 DO("memalign_common" );
343 #elif defined(VGO_darwin)
344 DO("malloc_zone_malloc" );
345 DO("malloc_zone_calloc" );
346 DO("malloc_zone_realloc" );
347 DO("malloc_zone_memalign" );
348 DO("malloc_zone_valloc" );
352 static void init_ignore_fns(void)
354 // Create the (empty) list.
355 ignore_fns = VG_(newXA)(VG_(malloc), "ms.main.iif.1",
356 VG_(free), sizeof(Char*));
359 // Determines if the named function is a member of the XArray.
360 static Bool is_member_fn(XArray* fns, Char* fnname)
365 // Nb: It's a linear search through the list, because we're comparing
366 // strings rather than pointers to strings.
367 // Nb: This gets called a lot. It was an OSet, but they're quite slow to
368 // iterate through so it wasn't a good choice.
369 for (i = 0; i < VG_(sizeXA)(fns); i++) {
370 fn_ptr = VG_(indexXA)(fns, i);
371 if (VG_STREQ(fnname, *fn_ptr))
378 //------------------------------------------------------------//
379 //--- Command line args ---//
380 //------------------------------------------------------------//
382 #define MAX_DEPTH 200
384 typedef enum { TimeI, TimeMS, TimeB } TimeUnit;
386 static Char* TimeUnit_to_string(TimeUnit time_unit)
389 case TimeI: return "i";
390 case TimeMS: return "ms";
391 case TimeB: return "B";
392 default: tl_assert2(0, "TimeUnit_to_string: unrecognised TimeUnit");
396 static Bool clo_heap = True;
397 // clo_heap_admin is deliberately a word-sized type. At one point it was
398 // a UInt, but this caused problems on 64-bit machines when it was
399 // multiplied by a small negative number and then promoted to a
400 // word-sized type -- it ended up with a value of 4.2 billion. Sigh.
401 static SSizeT clo_heap_admin = 8;
402 static Bool clo_stacks = False;
403 static Int clo_depth = 30;
404 static double clo_threshold = 1.0; // percentage
405 static double clo_peak_inaccuracy = 1.0; // percentage
406 static Int clo_time_unit = TimeI;
407 static Int clo_detailed_freq = 10;
408 static Int clo_max_snapshots = 100;
409 static Char* clo_massif_out_file = "massif.out.%p";
411 static XArray* args_for_massif;
413 static Bool ms_process_cmd_line_option(Char* arg)
417 // Remember the arg for later use.
418 VG_(addToXA)(args_for_massif, &arg);
420 if VG_BOOL_CLO(arg, "--heap", clo_heap) {}
421 else if VG_BOOL_CLO(arg, "--stacks", clo_stacks) {}
423 else if VG_BINT_CLO(arg, "--heap-admin", clo_heap_admin, 0, 1024) {}
424 else if VG_BINT_CLO(arg, "--depth", clo_depth, 1, MAX_DEPTH) {}
426 else if VG_DBL_CLO(arg, "--threshold", clo_threshold) {}
428 else if VG_DBL_CLO(arg, "--peak-inaccuracy", clo_peak_inaccuracy) {}
430 else if VG_BINT_CLO(arg, "--detailed-freq", clo_detailed_freq, 1, 10000) {}
431 else if VG_BINT_CLO(arg, "--max-snapshots", clo_max_snapshots, 10, 1000) {}
433 else if VG_XACT_CLO(arg, "--time-unit=i", clo_time_unit, TimeI) {}
434 else if VG_XACT_CLO(arg, "--time-unit=ms", clo_time_unit, TimeMS) {}
435 else if VG_XACT_CLO(arg, "--time-unit=B", clo_time_unit, TimeB) {}
437 else if VG_STR_CLO(arg, "--alloc-fn", tmp_str) {
438 VG_(addToXA)(alloc_fns, &tmp_str);
440 else if VG_STR_CLO(arg, "--ignore-fn", tmp_str) {
441 VG_(addToXA)(ignore_fns, &tmp_str);
443 else if VG_STR_CLO(arg, "--massif-out-file", clo_massif_out_file) {}
446 return VG_(replacement_malloc_process_cmd_line_option)(arg);
451 static void ms_print_usage(void)
454 " --heap=no|yes profile heap blocks [yes]\n"
455 " --heap-admin=<size> average admin bytes per heap block;\n"
456 " ignored if --heap=no [8]\n"
457 " --stacks=no|yes profile stack(s) [no]\n"
458 " --depth=<number> depth of contexts [30]\n"
459 " --alloc-fn=<name> specify <name> as an alloc function [empty]\n"
460 " --ignore-fn=<name> ignore heap allocations within <name> [empty]\n"
461 " --threshold=<m.n> significance threshold, as a percentage [1.0]\n"
462 " --peak-inaccuracy=<m.n> maximum peak inaccuracy, as a percentage [1.0]\n"
463 " --time-unit=i|ms|B time unit: instructions executed, milliseconds\n"
464 " or heap bytes alloc'd/dealloc'd [i]\n"
465 " --detailed-freq=<N> every Nth snapshot should be detailed [10]\n"
466 " --max-snapshots=<N> maximum number of snapshots recorded [100]\n"
467 " --massif-out-file=<file> output file name [massif.out.%%p]\n"
471 static void ms_print_debug_usage(void)
479 //------------------------------------------------------------//
480 //--- XPts, XTrees and XCons ---//
481 //------------------------------------------------------------//
483 // An XPt represents an "execution point", ie. a code address. Each XPt is
484 // part of a tree of XPts (an "execution tree", or "XTree"). The details of
485 // the heap are represented by a single XTree.
487 // The root of the tree is 'alloc_xpt', which represents all allocation
489 // - malloc/calloc/realloc/memalign/new/new[];
490 // - user-specified allocation functions (using --alloc-fn);
491 // - custom allocation (MALLOCLIKE) points
492 // It's a bit of a fake XPt (ie. its 'ip' is zero), and is only used because
493 // it makes the code simpler.
495 // Any child of 'alloc_xpt' is called a "top-XPt". The XPts at the bottom
496 // of an XTree (leaf nodes) are "bottom-XPTs".
498 // Each path from a top-XPt to a bottom-XPt through an XTree gives an
499 // execution context ("XCon"), ie. a stack trace. (And sub-paths represent
500 // stack sub-traces.) The number of XCons in an XTree is equal to the
501 // number of bottom-XPTs in that XTree.
503 // alloc_xpt XTrees are bi-directional.
506 // > parent < Example: if child1() calls parent() and child2()
507 // / | \ also calls parent(), and parent() calls malloc(),
508 // | / \ | the XTree will look like this.
512 // (Note that malformed stack traces can lead to difficulties. See the
513 // comment at the bottom of get_XCon.)
515 // XTrees and XPts are mirrored by SXTrees and SXPts, where the 'S' is short
516 // for "saved". When the XTree is duplicated for a snapshot, we duplicate
517 // it as an SXTree, which is similar but omits some things it does not need,
518 // and aggregates up insignificant nodes. This is important as an SXTree is
519 // typically much smaller than an XTree.
521 // XXX: make XPt and SXPt extensible arrays, to avoid having to do two
522 // allocations per Pt.
524 typedef struct _XPt XPt;
526 Addr ip; // code address
528 // Bottom-XPts: space for the precise context.
529 // Other XPts: space of all the descendent bottom-XPts.
530 // Nb: this value goes up and down as the program executes.
533 XPt* parent; // pointer to parent XPt
536 // n_children and max_children are 32-bit integers. 16-bit integers
537 // are too small -- a very big program might have more than 65536
538 // allocation points (ie. top-XPts) -- Konqueror starting up has 1800.
539 UInt n_children; // number of children
540 UInt max_children; // capacity of children array
541 XPt** children; // pointers to children XPts
551 typedef struct _SXPt SXPt;
554 SizeT szB; // memory size for the node, be it Sig or Insig
556 // An SXPt representing a single significant code location. Much like
557 // an XPt, minus the fields that aren't necessary.
565 // An SXPt representing one or more code locations, all below the
566 // significance threshold.
568 Int n_xpts; // number of aggregated XPts
574 // Fake XPt representing all allocation functions like malloc(). Acts as
575 // parent node to all top-XPts.
576 static XPt* alloc_xpt;
578 // Cheap allocation for blocks that never need to be freed. Saves about 10%
579 // for Konqueror startup with --depth=40.
580 static void* perm_malloc(SizeT n_bytes)
582 static Addr hp = 0; // current heap pointer
583 static Addr hp_lim = 0; // maximum usable byte in current block
585 #define SUPERBLOCK_SIZE (1 << 20) // 1 MB
587 if (hp + n_bytes > hp_lim) {
588 hp = (Addr)VG_(am_shadow_alloc)(SUPERBLOCK_SIZE);
590 VG_(out_of_memory_NORETURN)( "massif:perm_malloc",
592 hp_lim = hp + SUPERBLOCK_SIZE - 1;
597 return (void*)(hp - n_bytes);
600 static XPt* new_XPt(Addr ip, XPt* parent)
602 // XPts are never freed, so we can use perm_malloc to allocate them.
603 // Note that we cannot use perm_malloc for the 'children' array, because
604 // that needs to be resizable.
605 XPt* xpt = perm_malloc(sizeof(XPt));
608 xpt->parent = parent;
610 // We don't initially allocate any space for children. We let that
611 // happen on demand. Many XPts (ie. all the bottom-XPts) don't have any
614 xpt->max_children = 0;
615 xpt->children = NULL;
623 static void add_child_xpt(XPt* parent, XPt* child)
625 // Expand 'children' if necessary.
626 tl_assert(parent->n_children <= parent->max_children);
627 if (parent->n_children == parent->max_children) {
628 if (0 == parent->max_children) {
629 parent->max_children = 4;
630 parent->children = VG_(malloc)( "ms.main.acx.1",
631 parent->max_children * sizeof(XPt*) );
632 n_xpt_init_expansions++;
634 parent->max_children *= 2; // Double size
635 parent->children = VG_(realloc)( "ms.main.acx.2",
637 parent->max_children * sizeof(XPt*) );
638 n_xpt_later_expansions++;
642 // Insert new child XPt in parent's children list.
643 parent->children[ parent->n_children++ ] = child;
646 // Reverse comparison for a reverse sort -- biggest to smallest.
647 static Int SXPt_revcmp_szB(void* n1, void* n2)
649 SXPt* sxpt1 = *(SXPt**)n1;
650 SXPt* sxpt2 = *(SXPt**)n2;
651 return ( sxpt1->szB < sxpt2->szB ? 1
652 : sxpt1->szB > sxpt2->szB ? -1
656 //------------------------------------------------------------//
657 //--- XTree Operations ---//
658 //------------------------------------------------------------//
660 // Duplicates an XTree as an SXTree.
661 static SXPt* dup_XTree(XPt* xpt, SizeT total_szB)
663 Int i, n_sig_children, n_insig_children, n_child_sxpts;
664 SizeT sig_child_threshold_szB;
667 // Number of XPt children Action for SXPT
668 // ------------------ ---------------
669 // 0 sig, 0 insig alloc 0 children
670 // N sig, 0 insig alloc N children, dup all
671 // N sig, M insig alloc N+1, dup first N, aggregate remaining M
672 // 0 sig, M insig alloc 1, aggregate M
674 // Work out how big a child must be to be significant. If the current
675 // total_szB is zero, then we set it to 1, which means everything will be
676 // judged insignificant -- this is sensible, as there's no point showing
677 // any detail for this case. Unless they used --threshold=0, in which
678 // case we show them everything because that's what they asked for.
680 // Nb: We do this once now, rather than once per child, because if we do
681 // that the cost of all the divisions adds up to something significant.
682 if (0 == total_szB && 0 != clo_threshold) {
683 sig_child_threshold_szB = 1;
685 sig_child_threshold_szB = (SizeT)((total_szB * clo_threshold) / 100);
688 // How many children are significant? And do we need an aggregate SXPt?
690 for (i = 0; i < xpt->n_children; i++) {
691 if (xpt->children[i]->szB >= sig_child_threshold_szB) {
695 n_insig_children = xpt->n_children - n_sig_children;
696 n_child_sxpts = n_sig_children + ( n_insig_children > 0 ? 1 : 0 );
698 // Duplicate the XPt.
699 sxpt = VG_(malloc)("ms.main.dX.1", sizeof(SXPt));
702 sxpt->szB = xpt->szB;
703 sxpt->Sig.ip = xpt->ip;
704 sxpt->Sig.n_children = n_child_sxpts;
706 // Create the SXPt's children.
707 if (n_child_sxpts > 0) {
709 SizeT sig_children_szB = 0, insig_children_szB = 0;
710 sxpt->Sig.children = VG_(malloc)("ms.main.dX.2",
711 n_child_sxpts * sizeof(SXPt*));
713 // Duplicate the significant children. (Nb: sig_children_szB +
714 // insig_children_szB doesn't necessarily equal xpt->szB.)
716 for (i = 0; i < xpt->n_children; i++) {
717 if (xpt->children[i]->szB >= sig_child_threshold_szB) {
718 sxpt->Sig.children[j++] = dup_XTree(xpt->children[i], total_szB);
719 sig_children_szB += xpt->children[i]->szB;
721 insig_children_szB += xpt->children[i]->szB;
725 // Create the SXPt for the insignificant children, if any, and put it
726 // in the last child entry.
727 if (n_insig_children > 0) {
728 // Nb: We 'n_sxpt_allocs' here because creating an Insig SXPt
729 // doesn't involve a call to dup_XTree().
730 SXPt* insig_sxpt = VG_(malloc)("ms.main.dX.3", sizeof(SXPt));
732 insig_sxpt->tag = InsigSXPt;
733 insig_sxpt->szB = insig_children_szB;
734 insig_sxpt->Insig.n_xpts = n_insig_children;
735 sxpt->Sig.children[n_sig_children] = insig_sxpt;
738 sxpt->Sig.children = NULL;
744 static void free_SXTree(SXPt* sxpt)
747 tl_assert(sxpt != NULL);
751 // Free all children SXPts, then the children array.
752 for (i = 0; i < sxpt->Sig.n_children; i++) {
753 free_SXTree(sxpt->Sig.children[i]);
754 sxpt->Sig.children[i] = NULL;
756 VG_(free)(sxpt->Sig.children); sxpt->Sig.children = NULL;
762 default: tl_assert2(0, "free_SXTree: unknown SXPt tag");
765 // Free the SXPt itself.
766 VG_(free)(sxpt); sxpt = NULL;
770 // Sanity checking: we periodically check the heap XTree with
771 // ms_expensive_sanity_check.
772 static void sanity_check_XTree(XPt* xpt, XPt* parent)
774 tl_assert(xpt != NULL);
776 // Check back-pointer.
777 tl_assert2(xpt->parent == parent,
778 "xpt->parent = %p, parent = %p\n", xpt->parent, parent);
780 // Check children counts look sane.
781 tl_assert(xpt->n_children <= xpt->max_children);
783 // Unfortunately, xpt's size is not necessarily equal to the sum of xpt's
784 // children's sizes. See comment at the bottom of get_XCon.
787 // Sanity checking: we check SXTrees (which are in snapshots) after
788 // snapshots are created, before they are deleted, and before they are
790 static void sanity_check_SXTree(SXPt* sxpt)
794 tl_assert(sxpt != NULL);
796 // Check the sum of any children szBs equals the SXPt's szB. Check the
797 // children at the same time.
800 if (sxpt->Sig.n_children > 0) {
801 for (i = 0; i < sxpt->Sig.n_children; i++) {
802 sanity_check_SXTree(sxpt->Sig.children[i]);
810 default: tl_assert2(0, "sanity_check_SXTree: unknown SXPt tag");
815 //------------------------------------------------------------//
816 //--- XCon Operations ---//
817 //------------------------------------------------------------//
819 // This is the limit on the number of removed alloc-fns that can be in a
821 #define MAX_OVERESTIMATE 50
822 #define MAX_IPS (MAX_DEPTH + MAX_OVERESTIMATE)
824 // This is used for various buffers which can hold function names/IP
825 // description. Some C++ names can get really long so 1024 isn't big
829 // Determine if the given IP belongs to a function that should be ignored.
830 static Bool fn_should_be_ignored(Addr ip)
832 static Char buf[BUF_LEN];
834 ( VG_(get_fnname)(ip, buf, BUF_LEN) && is_member_fn(ignore_fns, buf)
838 // Get the stack trace for an XCon, filtering out uninteresting entries:
839 // alloc-fns and entries above alloc-fns, and entries below main-or-below-main.
840 // Eg: alloc-fn1 / alloc-fn2 / a / b / main / (below main) / c
841 // becomes: a / b / main
842 // Nb: it's possible to end up with an empty trace, eg. if 'main' is marked
843 // as an alloc-fn. This is ok.
845 Int get_IPs( ThreadId tid, Bool is_custom_alloc, Addr ips[])
847 static Char buf[BUF_LEN];
848 Int n_ips, i, n_alloc_fns_removed;
852 // We ask for a few more IPs than clo_depth suggests we need. Then we
853 // remove every entry that is an alloc-fn. Depending on the
854 // circumstances, we may need to redo it all, asking for more IPs.
856 // - If the original stack trace is smaller than asked-for, redo=False
857 // - Else if after filtering we have >= clo_depth IPs, redo=False
859 // In other words, to redo, we'd have to get a stack trace as big as we
860 // asked for and remove more than 'overestimate' alloc-fns.
863 redo = True; // Assume this to begin with.
864 for (overestimate = 3; redo; overestimate += 6) {
865 // This should never happen -- would require MAX_OVERESTIMATE
866 // alloc-fns to be removed from the stack trace.
867 if (overestimate > MAX_OVERESTIMATE)
868 VG_(tool_panic)("get_IPs: ips[] too small, inc. MAX_OVERESTIMATE?");
870 // Ask for more IPs than clo_depth suggests we need.
871 n_ips = VG_(get_StackTrace)( tid, ips, clo_depth + overestimate,
872 NULL/*array to dump SP values in*/,
873 NULL/*array to dump FP values in*/,
874 0/*first_ip_delta*/ );
875 tl_assert(n_ips > 0);
877 // If the original stack trace is smaller than asked-for, redo=False.
878 if (n_ips < clo_depth + overestimate) { redo = False; }
880 // Filter out alloc fns. If it's a non-custom block, we remove the
881 // first entry (which will be one of malloc, __builtin_new, etc)
882 // without looking at it, because VG_(get_fnname) is expensive (it
883 // involves calls to VG_(malloc)/VG_(free)).
884 n_alloc_fns_removed = ( is_custom_alloc ? 0 : 1 );
885 for (i = n_alloc_fns_removed; i < n_ips; i++) {
886 if (VG_(get_fnname)(ips[i], buf, BUF_LEN)) {
887 if (is_member_fn(alloc_fns, buf)) {
888 n_alloc_fns_removed++;
894 // Remove the alloc fns by shuffling the rest down over them.
895 n_ips -= n_alloc_fns_removed;
896 for (i = 0; i < n_ips; i++) {
897 ips[i] = ips[i + n_alloc_fns_removed];
900 // If after filtering we have >= clo_depth IPs, redo=False
901 if (n_ips >= clo_depth) {
903 n_ips = clo_depth; // Ignore any IPs below --depth.
913 // Gets an XCon and puts it in the tree. Returns the XCon's bottom-XPt.
914 // Unless the allocation should be ignored, in which case we return NULL.
915 static XPt* get_XCon( ThreadId tid, Bool is_custom_alloc )
917 static Addr ips[MAX_IPS];
919 XPt* xpt = alloc_xpt;
921 // After this call, the IPs we want are in ips[0]..ips[n_ips-1].
922 Int n_ips = get_IPs(tid, is_custom_alloc, ips);
924 // Should we ignore this allocation? (Nb: n_ips can be zero, eg. if
925 // 'main' is marked as an alloc-fn.)
926 if (n_ips > 0 && fn_should_be_ignored(ips[0])) {
930 // Now do the search/insertion of the XCon.
931 for (i = 0; i < n_ips; i++) {
934 // Look for IP in xpt's children.
935 // Linear search, ugh -- about 10% of time for konqueror startup tried
936 // caching last result, only hit about 4% for konqueror.
937 // Nb: this search hits about 98% of the time for konqueror
938 for (ch = 0; True; ch++) {
939 if (ch == xpt->n_children) {
940 // IP not found in the children.
941 // Create and add new child XPt, then stop.
942 XPt* new_child_xpt = new_XPt(ip, xpt);
943 add_child_xpt(xpt, new_child_xpt);
947 } else if (ip == xpt->children[ch]->ip) {
948 // Found the IP in the children, stop.
949 xpt = xpt->children[ch];
955 // [Note: several comments refer to this comment. Do not delete it
956 // without updating them.]
958 // A complication... If all stack traces were well-formed, then the
959 // returned xpt would always be a bottom-XPt. As a consequence, an XPt's
960 // size would always be equal to the sum of its children's sizes, which
961 // is an excellent sanity check.
963 // Unfortunately, stack traces occasionally are malformed, ie. truncated.
964 // This allows a stack trace to be a sub-trace of another, eg. a/b/c is a
965 // sub-trace of a/b/c/d. So we can't assume this xpt is a bottom-XPt;
966 // nor can we do sanity check an XPt's size against its children's sizes.
967 // This is annoying, but must be dealt with. (Older versions of Massif
968 // had this assertion in, and it was reported to fail by real users a
969 // couple of times.) Even more annoyingly, I can't come up with a simple
970 // test case that exhibit such a malformed stack trace, so I can't
971 // regression test it. Sigh.
973 // However, we can print a warning, so that if it happens (unexpectedly)
974 // in existing regression tests we'll know. Also, it warns users that
975 // the output snapshots may not add up the way they might expect.
977 //tl_assert(0 == xpt->n_children); // Must be bottom-XPt
978 if (0 != xpt->n_children) {
979 static Int n_moans = 0;
982 "Warning: Malformed stack trace detected. In Massif's output,\n");
984 " the size of an entry's child entries may not sum up\n");
986 " to the entry's size as they normally do.\n");
990 " (And Massif now won't warn about this again.)\n");
996 // Update 'szB' of every XPt in the XCon, by percolating upwards.
997 static void update_XCon(XPt* xpt, SSizeT space_delta)
999 tl_assert(True == clo_heap);
1000 tl_assert(NULL != xpt);
1002 if (0 == space_delta)
1005 while (xpt != alloc_xpt) {
1006 if (space_delta < 0) tl_assert(xpt->szB >= -space_delta);
1007 xpt->szB += space_delta;
1010 if (space_delta < 0) tl_assert(alloc_xpt->szB >= -space_delta);
1011 alloc_xpt->szB += space_delta;
1015 //------------------------------------------------------------//
1016 //--- Snapshots ---//
1017 //------------------------------------------------------------//
1019 // Snapshots are done in a way so that we always have a reasonable number of
1020 // them. We start by taking them quickly. Once we hit our limit, we cull
1021 // some (eg. half), and start taking them more slowly. Once we hit the
1022 // limit again, we again cull and then take them even more slowly, and so
1025 // Time is measured either in i or ms or bytes, depending on the --time-unit
1026 // option. It's a Long because it can exceed 32-bits reasonably easily, and
1027 // because we need to allow negative values to represent unset times.
1030 #define UNUSED_SNAPSHOT_TIME -333 // A conspicuous negative number.
1045 SizeT heap_extra_szB;// Heap slop + admin bytes.
1047 SXPt* alloc_sxpt; // Heap XTree root, if a detailed snapshot,
1048 } // otherwise NULL.
1051 static UInt next_snapshot_i = 0; // Index of where next snapshot will go.
1052 static Snapshot* snapshots; // Array of snapshots.
1054 static Bool is_snapshot_in_use(Snapshot* snapshot)
1056 if (Unused == snapshot->kind) {
1057 // If snapshot is unused, check all the fields are unset.
1058 tl_assert(snapshot->time == UNUSED_SNAPSHOT_TIME);
1059 tl_assert(snapshot->heap_extra_szB == 0);
1060 tl_assert(snapshot->heap_szB == 0);
1061 tl_assert(snapshot->stacks_szB == 0);
1062 tl_assert(snapshot->alloc_sxpt == NULL);
1065 tl_assert(snapshot->time != UNUSED_SNAPSHOT_TIME);
1070 static Bool is_detailed_snapshot(Snapshot* snapshot)
1072 return (snapshot->alloc_sxpt ? True : False);
1075 static Bool is_uncullable_snapshot(Snapshot* snapshot)
1077 return &snapshots[0] == snapshot // First snapshot
1078 || &snapshots[next_snapshot_i-1] == snapshot // Last snapshot
1079 || snapshot->kind == Peak; // Peak snapshot
1082 static void sanity_check_snapshot(Snapshot* snapshot)
1084 if (snapshot->alloc_sxpt) {
1085 sanity_check_SXTree(snapshot->alloc_sxpt);
1089 // All the used entries should look used, all the unused ones should be clear.
1090 static void sanity_check_snapshots_array(void)
1093 for (i = 0; i < next_snapshot_i; i++) {
1094 tl_assert( is_snapshot_in_use( & snapshots[i] ));
1096 for ( ; i < clo_max_snapshots; i++) {
1097 tl_assert(!is_snapshot_in_use( & snapshots[i] ));
1101 // This zeroes all the fields in the snapshot, but does not free the heap
1102 // XTree if present. It also does a sanity check unless asked not to; we
1103 // can't sanity check at startup when clearing the initial snapshots because
1104 // they're full of junk.
1105 static void clear_snapshot(Snapshot* snapshot, Bool do_sanity_check)
1107 if (do_sanity_check) sanity_check_snapshot(snapshot);
1108 snapshot->kind = Unused;
1109 snapshot->time = UNUSED_SNAPSHOT_TIME;
1110 snapshot->heap_extra_szB = 0;
1111 snapshot->heap_szB = 0;
1112 snapshot->stacks_szB = 0;
1113 snapshot->alloc_sxpt = NULL;
1116 // This zeroes all the fields in the snapshot, and frees the heap XTree if
1118 static void delete_snapshot(Snapshot* snapshot)
1120 // Nb: if there's an XTree, we free it after calling clear_snapshot,
1121 // because clear_snapshot does a sanity check which includes checking the
1123 SXPt* tmp_sxpt = snapshot->alloc_sxpt;
1124 clear_snapshot(snapshot, /*do_sanity_check*/True);
1126 free_SXTree(tmp_sxpt);
1130 static void VERB_snapshot(Int verbosity, Char* prefix, Int i)
1132 Snapshot* snapshot = &snapshots[i];
1134 switch (snapshot->kind) {
1135 case Peak: suffix = "p"; break;
1136 case Normal: suffix = ( is_detailed_snapshot(snapshot) ? "d" : "." ); break;
1137 case Unused: suffix = "u"; break;
1139 tl_assert2(0, "VERB_snapshot: unknown snapshot kind: %d", snapshot->kind);
1141 VERB(verbosity, "%s S%s%3d (t:%lld, hp:%ld, ex:%ld, st:%ld)\n",
1145 snapshot->heap_extra_szB,
1146 snapshot->stacks_szB
1150 // Cull half the snapshots; we choose those that represent the smallest
1151 // time-spans, because that gives us the most even distribution of snapshots
1152 // over time. (It's possible to lose interesting spikes, however.)
1154 // Algorithm for N snapshots: We find the snapshot representing the smallest
1155 // timeframe, and remove it. We repeat this until (N/2) snapshots are gone.
1156 // We have to do this one snapshot at a time, rather than finding the (N/2)
1157 // smallest snapshots in one hit, because when a snapshot is removed, its
1158 // neighbours immediately cover greater timespans. So it's O(N^2), but N is
1159 // small, and it's not done very often.
1161 // Once we're done, we return the new smallest interval between snapshots.
1162 // That becomes our minimum time interval.
1163 static UInt cull_snapshots(void)
1165 Int i, jp, j, jn, min_timespan_i;
1171 // Sets j to the index of the first not-yet-removed snapshot at or after i
1172 #define FIND_SNAPSHOT(i, j) \
1174 j < clo_max_snapshots && !is_snapshot_in_use(&snapshots[j]); \
1177 VERB(2, "Culling...\n");
1179 // First we remove enough snapshots by clearing them in-place. Once
1180 // that's done, we can slide the remaining ones down.
1181 for (i = 0; i < clo_max_snapshots/2; i++) {
1182 // Find the snapshot representing the smallest timespan. The timespan
1183 // for snapshot n = d(N-1,N)+d(N,N+1), where d(A,B) is the time between
1184 // snapshot A and B. We don't consider the first and last snapshots for
1186 Snapshot* min_snapshot;
1189 // Initial triple: (prev, curr, next) == (jp, j, jn)
1190 // Initial min_timespan is the first one.
1192 FIND_SNAPSHOT(1, j);
1193 FIND_SNAPSHOT(j+1, jn);
1194 min_timespan = 0x7fffffffffffffffLL;
1196 while (jn < clo_max_snapshots) {
1197 Time timespan = snapshots[jn].time - snapshots[jp].time;
1198 tl_assert(timespan >= 0);
1199 // Nb: We never cull the peak snapshot.
1200 if (Peak != snapshots[j].kind && timespan < min_timespan) {
1201 min_timespan = timespan;
1204 // Move on to next triple
1207 FIND_SNAPSHOT(jn+1, jn);
1209 // We've found the least important snapshot, now delete it. First
1210 // print it if necessary.
1211 tl_assert(-1 != min_j); // Check we found a minimum.
1212 min_snapshot = & snapshots[ min_j ];
1213 if (VG_(clo_verbosity) > 1) {
1215 VG_(snprintf)(buf, 64, " %3d (t-span = %lld)", i, min_timespan);
1216 VERB_snapshot(2, buf, min_j);
1218 delete_snapshot(min_snapshot);
1222 // Slide down the remaining snapshots over the removed ones. First set i
1223 // to point to the first empty slot, and j to the first full slot after
1224 // i. Then slide everything down.
1225 for (i = 0; is_snapshot_in_use( &snapshots[i] ); i++) { }
1226 for (j = i; !is_snapshot_in_use( &snapshots[j] ); j++) { }
1227 for ( ; j < clo_max_snapshots; j++) {
1228 if (is_snapshot_in_use( &snapshots[j] )) {
1229 snapshots[i++] = snapshots[j];
1230 clear_snapshot(&snapshots[j], /*do_sanity_check*/True);
1233 next_snapshot_i = i;
1235 // Check snapshots array looks ok after changes.
1236 sanity_check_snapshots_array();
1238 // Find the minimum timespan remaining; that will be our new minimum
1239 // time interval. Note that above we were finding timespans by measuring
1240 // two intervals around a snapshot that was under consideration for
1241 // deletion. Here we only measure single intervals because all the
1242 // deletions have occurred.
1244 // But we have to be careful -- some snapshots (eg. snapshot 0, and the
1245 // peak snapshot) are uncullable. If two uncullable snapshots end up
1246 // next to each other, they'll never be culled (assuming the peak doesn't
1247 // change), and the time gap between them will not change. However, the
1248 // time between the remaining cullable snapshots will grow ever larger.
1249 // This means that the min_timespan found will always be that between the
1250 // two uncullable snapshots, and it will be much smaller than it should
1251 // be. To avoid this problem, when computing the minimum timespan, we
1252 // ignore any timespans between two uncullable snapshots.
1253 tl_assert(next_snapshot_i > 1);
1254 min_timespan = 0x7fffffffffffffffLL;
1255 min_timespan_i = -1;
1256 for (i = 1; i < next_snapshot_i; i++) {
1257 if (is_uncullable_snapshot(&snapshots[i]) &&
1258 is_uncullable_snapshot(&snapshots[i-1]))
1260 VERB(2, "(Ignoring interval %d--%d when computing minimum)\n", i-1, i);
1262 Time timespan = snapshots[i].time - snapshots[i-1].time;
1263 tl_assert(timespan >= 0);
1264 if (timespan < min_timespan) {
1265 min_timespan = timespan;
1270 tl_assert(-1 != min_timespan_i); // Check we found a minimum.
1272 // Print remaining snapshots, if necessary.
1273 if (VG_(clo_verbosity) > 1) {
1274 VERB(2, "Finished culling (%3d of %3d deleted)\n",
1275 n_deleted, clo_max_snapshots);
1276 for (i = 0; i < next_snapshot_i; i++) {
1277 VERB_snapshot(2, " post-cull", i);
1279 VERB(2, "New time interval = %lld (between snapshots %d and %d)\n",
1280 min_timespan, min_timespan_i-1, min_timespan_i);
1283 return min_timespan;
1286 static Time get_time(void)
1288 // Get current time, in whatever time unit we're using.
1289 if (clo_time_unit == TimeI) {
1290 return guest_instrs_executed;
1291 } else if (clo_time_unit == TimeMS) {
1292 // Some stuff happens between the millisecond timer being initialised
1293 // to zero and us taking our first snapshot. We determine that time
1294 // gap so we can subtract it from all subsequent times so that our
1295 // first snapshot is considered to be at t = 0ms. Unfortunately, a
1296 // bunch of symbols get read after the first snapshot is taken but
1297 // before the second one (which is triggered by the first allocation),
1298 // so when the time-unit is 'ms' we always have a big gap between the
1299 // first two snapshots. But at least users won't have to wonder why
1300 // the first snapshot isn't at t=0.
1301 static Bool is_first_get_time = True;
1302 static Time start_time_ms;
1303 if (is_first_get_time) {
1304 start_time_ms = VG_(read_millisecond_timer)();
1305 is_first_get_time = False;
1308 return VG_(read_millisecond_timer)() - start_time_ms;
1310 } else if (clo_time_unit == TimeB) {
1311 return total_allocs_deallocs_szB;
1313 tl_assert2(0, "bad --time-unit value");
1317 // Take a snapshot, and only that -- decisions on whether to take a
1318 // snapshot, or what kind of snapshot, are made elsewhere.
1319 // Nb: we call the arg "my_time" because "time" shadows a global declaration
1320 // in /usr/include/time.h on Darwin.
1322 take_snapshot(Snapshot* snapshot, SnapshotKind kind, Time my_time,
1325 tl_assert(!is_snapshot_in_use(snapshot));
1326 tl_assert(have_started_executing_code);
1328 // Heap and heap admin.
1330 snapshot->heap_szB = heap_szB;
1332 SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
1333 snapshot->alloc_sxpt = dup_XTree(alloc_xpt, total_szB);
1334 tl_assert( alloc_xpt->szB == heap_szB);
1335 tl_assert(snapshot->alloc_sxpt->szB == heap_szB);
1337 snapshot->heap_extra_szB = heap_extra_szB;
1342 snapshot->stacks_szB = stacks_szB;
1345 // Rest of snapshot.
1346 snapshot->kind = kind;
1347 snapshot->time = my_time;
1348 sanity_check_snapshot(snapshot);
1351 if (Peak == kind) n_peak_snapshots++;
1352 if (is_detailed) n_detailed_snapshots++;
1357 // Take a snapshot, if it's time, or if we've hit a peak.
1359 maybe_take_snapshot(SnapshotKind kind, Char* what)
1361 // 'min_time_interval' is the minimum time interval between snapshots.
1362 // If we try to take a snapshot and less than this much time has passed,
1363 // we don't take it. It gets larger as the program runs longer. It's
1364 // initialised to zero so that we begin by taking snapshots as quickly as
1366 static Time min_time_interval = 0;
1367 // Zero allows startup snapshot.
1368 static Time earliest_possible_time_of_next_snapshot = 0;
1369 static Int n_snapshots_since_last_detailed = 0;
1370 static Int n_skipped_snapshots_since_last_snapshot = 0;
1374 // Nb: we call this variable "my_time" because "time" shadows a global
1375 // declaration in /usr/include/time.h on Darwin.
1376 Time my_time = get_time();
1380 // Only do a snapshot if it's time.
1381 if (my_time < earliest_possible_time_of_next_snapshot) {
1382 n_skipped_snapshots++;
1383 n_skipped_snapshots_since_last_snapshot++;
1386 is_detailed = (clo_detailed_freq-1 == n_snapshots_since_last_detailed);
1390 // Because we're about to do a deallocation, we're coming down from a
1391 // local peak. If it is (a) actually a global peak, and (b) a certain
1392 // amount bigger than the previous peak, then we take a peak snapshot.
1393 // By not taking a snapshot for every peak, we save a lot of effort --
1394 // because many peaks remain peak only for a short time.
1395 SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
1396 SizeT excess_szB_for_new_peak =
1397 (SizeT)((peak_snapshot_total_szB * clo_peak_inaccuracy) / 100);
1398 if (total_szB <= peak_snapshot_total_szB + excess_szB_for_new_peak) {
1406 tl_assert2(0, "maybe_take_snapshot: unrecognised snapshot kind");
1409 // Take the snapshot.
1410 snapshot = & snapshots[next_snapshot_i];
1411 take_snapshot(snapshot, kind, my_time, is_detailed);
1413 // Record if it was detailed.
1415 n_snapshots_since_last_detailed = 0;
1417 n_snapshots_since_last_detailed++;
1420 // Update peak data, if it's a Peak snapshot.
1422 Int i, number_of_peaks_snapshots_found = 0;
1424 // Sanity check the size, then update our recorded peak.
1425 SizeT snapshot_total_szB =
1426 snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
1427 tl_assert2(snapshot_total_szB > peak_snapshot_total_szB,
1428 "%ld, %ld\n", snapshot_total_szB, peak_snapshot_total_szB);
1429 peak_snapshot_total_szB = snapshot_total_szB;
1431 // Find the old peak snapshot, if it exists, and mark it as normal.
1432 for (i = 0; i < next_snapshot_i; i++) {
1433 if (Peak == snapshots[i].kind) {
1434 snapshots[i].kind = Normal;
1435 number_of_peaks_snapshots_found++;
1438 tl_assert(number_of_peaks_snapshots_found <= 1);
1441 // Finish up verbosity and stats stuff.
1442 if (n_skipped_snapshots_since_last_snapshot > 0) {
1443 VERB(2, " (skipped %d snapshot%s)\n",
1444 n_skipped_snapshots_since_last_snapshot,
1445 ( 1 == n_skipped_snapshots_since_last_snapshot ? "" : "s") );
1447 VERB_snapshot(2, what, next_snapshot_i);
1448 n_skipped_snapshots_since_last_snapshot = 0;
1450 // Cull the entries, if our snapshot table is full.
1452 if (clo_max_snapshots == next_snapshot_i) {
1453 min_time_interval = cull_snapshots();
1456 // Work out the earliest time when the next snapshot can happen.
1457 earliest_possible_time_of_next_snapshot = my_time + min_time_interval;
1461 //------------------------------------------------------------//
1462 //--- Sanity checking ---//
1463 //------------------------------------------------------------//
1465 static Bool ms_cheap_sanity_check ( void )
1467 return True; // Nothing useful we can cheaply check.
1470 static Bool ms_expensive_sanity_check ( void )
1472 sanity_check_XTree(alloc_xpt, /*parent*/NULL);
1473 sanity_check_snapshots_array();
1478 //------------------------------------------------------------//
1479 //--- Heap management ---//
1480 //------------------------------------------------------------//
1482 // Metadata for heap blocks. Each one contains a pointer to a bottom-XPt,
1483 // which is a foothold into the XCon at which it was allocated. From
1484 // HP_Chunks, XPt 'space' fields are incremented (at allocation) and
1485 // decremented (at deallocation).
1487 // Nb: first two fields must match core's VgHashNode.
1490 struct _HP_Chunk* next;
1491 Addr data; // Ptr to actual block
1492 SizeT req_szB; // Size requested
1493 SizeT slop_szB; // Extra bytes given above those requested
1494 XPt* where; // Where allocated; bottom-XPt
1498 static VgHashTable malloc_list = NULL; // HP_Chunks
1500 static void update_alloc_stats(SSizeT szB_delta)
1502 // Update total_allocs_deallocs_szB.
1503 if (szB_delta < 0) szB_delta = -szB_delta;
1504 total_allocs_deallocs_szB += szB_delta;
1507 static void update_heap_stats(SSizeT heap_szB_delta, Int heap_extra_szB_delta)
1509 if (heap_szB_delta < 0)
1510 tl_assert(heap_szB >= -heap_szB_delta);
1511 if (heap_extra_szB_delta < 0)
1512 tl_assert(heap_extra_szB >= -heap_extra_szB_delta);
1514 heap_extra_szB += heap_extra_szB_delta;
1515 heap_szB += heap_szB_delta;
1517 update_alloc_stats(heap_szB_delta + heap_extra_szB_delta);
1521 void* new_block ( ThreadId tid, void* p, SizeT req_szB, SizeT req_alignB,
1525 Bool is_custom_alloc = (NULL != p);
1526 SizeT actual_szB, slop_szB;
1528 if ((SSizeT)req_szB < 0) return NULL;
1530 // Allocate and zero if necessary
1532 p = VG_(cli_malloc)( req_alignB, req_szB );
1536 if (is_zeroed) VG_(memset)(p, 0, req_szB);
1537 actual_szB = VG_(malloc_usable_size)(p);
1538 tl_assert(actual_szB >= req_szB);
1539 slop_szB = actual_szB - req_szB;
1544 // Make new HP_Chunk node, add to malloc_list
1545 hc = VG_(malloc)("ms.main.nb.1", sizeof(HP_Chunk));
1546 hc->req_szB = req_szB;
1547 hc->slop_szB = slop_szB;
1550 VG_(HT_add_node)(malloc_list, hc);
1553 VERB(3, "<<< new_mem_heap (%lu, %lu)\n", req_szB, slop_szB);
1555 hc->where = get_XCon( tid, is_custom_alloc );
1558 // Update statistics.
1561 // Update heap stats.
1562 update_heap_stats(req_szB, clo_heap_admin + slop_szB);
1565 update_XCon(hc->where, req_szB);
1567 // Maybe take a snapshot.
1568 maybe_take_snapshot(Normal, " alloc");
1571 // Ignored allocation.
1572 n_ignored_heap_allocs++;
1574 VERB(3, "(ignored)\n");
1584 void die_block ( void* p, Bool custom_free )
1586 // Remove HP_Chunk from malloc_list
1587 HP_Chunk* hc = VG_(HT_remove)(malloc_list, (UWord)p);
1589 return; // must have been a bogus free()
1593 VERB(3, "<<< die_mem_heap\n");
1596 // Update statistics.
1599 // Maybe take a peak snapshot, since it's a deallocation.
1600 maybe_take_snapshot(Peak, "de-PEAK");
1602 // Update heap stats.
1603 update_heap_stats(-hc->req_szB, -clo_heap_admin - hc->slop_szB);
1606 update_XCon(hc->where, -hc->req_szB);
1608 // Maybe take a snapshot.
1609 maybe_take_snapshot(Normal, "dealloc");
1612 n_ignored_heap_frees++;
1614 VERB(3, "(ignored)\n");
1617 VERB(3, ">>> (-%lu, -%lu)\n", hc->req_szB, hc->slop_szB);
1620 // Actually free the chunk, and the heap block (if necessary)
1621 VG_(free)( hc ); hc = NULL;
1626 // Nb: --ignore-fn is tricky for realloc. If the block's original alloc was
1627 // ignored, but the realloc is not requested to be ignored, and we are
1628 // shrinking the block, then we have to ignore the realloc -- otherwise we
1629 // could end up with negative heap sizes. This isn't a danger if we are
1630 // growing such a block, but for consistency (it also simplifies things) we
1631 // ignore such reallocs as well.
1633 void* renew_block ( ThreadId tid, void* p_old, SizeT new_req_szB )
1637 SizeT old_req_szB, old_slop_szB, new_slop_szB, new_actual_szB;
1638 XPt *old_where, *new_where;
1639 Bool is_ignored = False;
1641 // Remove the old block
1642 hc = VG_(HT_remove)(malloc_list, (UWord)p_old);
1644 return NULL; // must have been a bogus realloc()
1647 old_req_szB = hc->req_szB;
1648 old_slop_szB = hc->slop_szB;
1651 VERB(3, "<<< renew_mem_heap (%lu)\n", new_req_szB);
1654 // Update statistics.
1657 // Maybe take a peak snapshot, if it's (effectively) a deallocation.
1658 if (new_req_szB < old_req_szB) {
1659 maybe_take_snapshot(Peak, "re-PEAK");
1662 // The original malloc was ignored, so we have to ignore the
1668 // Actually do the allocation, if necessary.
1669 if (new_req_szB <= old_req_szB + old_slop_szB) {
1670 // New size is smaller or same; block not moved.
1672 new_slop_szB = old_slop_szB + (old_req_szB - new_req_szB);
1675 // New size is bigger; make new block, copy shared contents, free old.
1676 p_new = VG_(cli_malloc)(VG_(clo_alignment), new_req_szB);
1678 // Nb: if realloc fails, NULL is returned but the old block is not
1679 // touched. What an awful function.
1682 VG_(memcpy)(p_new, p_old, old_req_szB);
1683 VG_(cli_free)(p_old);
1684 new_actual_szB = VG_(malloc_usable_size)(p_new);
1685 tl_assert(new_actual_szB >= new_req_szB);
1686 new_slop_szB = new_actual_szB - new_req_szB;
1691 hc->data = (Addr)p_new;
1692 hc->req_szB = new_req_szB;
1693 hc->slop_szB = new_slop_szB;
1694 old_where = hc->where;
1699 new_where = get_XCon( tid, /*custom_malloc*/False);
1700 if (!is_ignored && new_where) {
1701 hc->where = new_where;
1702 update_XCon(old_where, -old_req_szB);
1703 update_XCon(new_where, new_req_szB);
1705 // The realloc itself is ignored.
1708 // Update statistics.
1709 n_ignored_heap_reallocs++;
1714 // Now insert the new hc (with a possibly new 'data' field) into
1715 // malloc_list. If this realloc() did not increase the memory size, we
1716 // will have removed and then re-added hc unnecessarily. But that's ok
1717 // because shrinking a block with realloc() is (presumably) much rarer
1718 // than growing it, and this way simplifies the growing case.
1719 VG_(HT_add_node)(malloc_list, hc);
1723 // Update heap stats.
1724 update_heap_stats(new_req_szB - old_req_szB,
1725 new_slop_szB - old_slop_szB);
1727 // Maybe take a snapshot.
1728 maybe_take_snapshot(Normal, "realloc");
1731 VERB(3, "(ignored)\n");
1734 VERB(3, ">>> (%ld, %ld)\n",
1735 new_req_szB - old_req_szB, new_slop_szB - old_slop_szB);
1742 //------------------------------------------------------------//
1743 //--- malloc() et al replacement wrappers ---//
1744 //------------------------------------------------------------//
1746 static void* ms_malloc ( ThreadId tid, SizeT szB )
1748 return new_block( tid, NULL, szB, VG_(clo_alignment), /*is_zeroed*/False );
1751 static void* ms___builtin_new ( ThreadId tid, SizeT szB )
1753 return new_block( tid, NULL, szB, VG_(clo_alignment), /*is_zeroed*/False );
1756 static void* ms___builtin_vec_new ( ThreadId tid, SizeT szB )
1758 return new_block( tid, NULL, szB, VG_(clo_alignment), /*is_zeroed*/False );
1761 static void* ms_calloc ( ThreadId tid, SizeT m, SizeT szB )
1763 return new_block( tid, NULL, m*szB, VG_(clo_alignment), /*is_zeroed*/True );
1766 static void *ms_memalign ( ThreadId tid, SizeT alignB, SizeT szB )
1768 return new_block( tid, NULL, szB, alignB, False );
1771 static void ms_free ( ThreadId tid __attribute__((unused)), void* p )
1773 die_block( p, /*custom_free*/False );
1776 static void ms___builtin_delete ( ThreadId tid, void* p )
1778 die_block( p, /*custom_free*/False);
1781 static void ms___builtin_vec_delete ( ThreadId tid, void* p )
1783 die_block( p, /*custom_free*/False );
1786 static void* ms_realloc ( ThreadId tid, void* p_old, SizeT new_szB )
1788 return renew_block(tid, p_old, new_szB);
1791 static SizeT ms_malloc_usable_size ( ThreadId tid, void* p )
1793 HP_Chunk* hc = VG_(HT_lookup)( malloc_list, (UWord)p );
1795 return ( hc ? hc->req_szB + hc->slop_szB : 0 );
1798 //------------------------------------------------------------//
1800 //------------------------------------------------------------//
1802 // We really want the inlining to occur...
1803 #define INLINE inline __attribute__((always_inline))
1805 static void update_stack_stats(SSizeT stack_szB_delta)
1807 if (stack_szB_delta < 0) tl_assert(stacks_szB >= -stack_szB_delta);
1808 stacks_szB += stack_szB_delta;
1810 update_alloc_stats(stack_szB_delta);
1813 static INLINE void new_mem_stack_2(SizeT len, Char* what)
1815 if (have_started_executing_code) {
1816 VERB(3, "<<< new_mem_stack (%ld)\n", len);
1818 update_stack_stats(len);
1819 maybe_take_snapshot(Normal, what);
1824 static INLINE void die_mem_stack_2(SizeT len, Char* what)
1826 if (have_started_executing_code) {
1827 VERB(3, "<<< die_mem_stack (%ld)\n", -len);
1829 maybe_take_snapshot(Peak, "stkPEAK");
1830 update_stack_stats(-len);
1831 maybe_take_snapshot(Normal, what);
1836 static void new_mem_stack(Addr a, SizeT len)
1838 new_mem_stack_2(len, "stk-new");
1841 static void die_mem_stack(Addr a, SizeT len)
1843 die_mem_stack_2(len, "stk-die");
1846 static void new_mem_stack_signal(Addr a, SizeT len, ThreadId tid)
1848 new_mem_stack_2(len, "sig-new");
1851 static void die_mem_stack_signal(Addr a, SizeT len)
1853 die_mem_stack_2(len, "sig-die");
1857 //------------------------------------------------------------//
1858 //--- Client Requests ---//
1859 //------------------------------------------------------------//
1861 static Bool ms_handle_client_request ( ThreadId tid, UWord* argv, UWord* ret )
1864 case VG_USERREQ__MALLOCLIKE_BLOCK: {
1866 void* p = (void*)argv[1];
1867 SizeT szB = argv[2];
1868 res = new_block( tid, p, szB, /*alignB--ignored*/0, /*is_zeroed*/False );
1869 tl_assert(res == p);
1873 case VG_USERREQ__FREELIKE_BLOCK: {
1874 void* p = (void*)argv[1];
1875 die_block( p, /*custom_free*/True );
1885 //------------------------------------------------------------//
1886 //--- Instrumentation ---//
1887 //------------------------------------------------------------//
1889 static void add_counter_update(IRSB* sbOut, Int n)
1891 #if defined(VG_BIGENDIAN)
1892 # define END Iend_BE
1893 #elif defined(VG_LITTLEENDIAN)
1894 # define END Iend_LE
1896 # error "Unknown endianness"
1898 // Add code to increment 'guest_instrs_executed' by 'n', like this:
1899 // WrTmp(t1, Load64(&guest_instrs_executed))
1900 // WrTmp(t2, Add64(RdTmp(t1), Const(n)))
1901 // Store(&guest_instrs_executed, t2)
1902 IRTemp t1 = newIRTemp(sbOut->tyenv, Ity_I64);
1903 IRTemp t2 = newIRTemp(sbOut->tyenv, Ity_I64);
1904 IRExpr* counter_addr = mkIRExpr_HWord( (HWord)&guest_instrs_executed );
1906 IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(END, Ity_I64, counter_addr));
1909 IRExpr_Binop(Iop_Add64, IRExpr_RdTmp(t1),
1910 IRExpr_Const(IRConst_U64(n))));
1911 IRStmt* st3 = IRStmt_Store(END, counter_addr, IRExpr_RdTmp(t2));
1913 addStmtToIRSB( sbOut, st1 );
1914 addStmtToIRSB( sbOut, st2 );
1915 addStmtToIRSB( sbOut, st3 );
1918 static IRSB* ms_instrument2( IRSB* sbIn )
1923 // We increment the instruction count in two places:
1924 // - just before any Ist_Exit statements;
1925 // - just before the IRSB's end.
1926 // In the former case, we zero 'n' and then continue instrumenting.
1928 sbOut = deepCopyIRSBExceptStmts(sbIn);
1930 for (i = 0; i < sbIn->stmts_used; i++) {
1931 IRStmt* st = sbIn->stmts[i];
1933 if (!st || st->tag == Ist_NoOp) continue;
1935 if (st->tag == Ist_IMark) {
1937 } else if (st->tag == Ist_Exit) {
1939 // Add an increment before the Exit statement, then reset 'n'.
1940 add_counter_update(sbOut, n);
1944 addStmtToIRSB( sbOut, st );
1948 // Add an increment before the SB end.
1949 add_counter_update(sbOut, n);
1955 IRSB* ms_instrument ( VgCallbackClosure* closure,
1957 VexGuestLayout* layout,
1958 VexGuestExtents* vge,
1959 IRType gWordTy, IRType hWordTy )
1961 if (! have_started_executing_code) {
1962 // Do an initial sample to guarantee that we have at least one.
1963 // We use 'maybe_take_snapshot' instead of 'take_snapshot' to ensure
1964 // 'maybe_take_snapshot's internal static variables are initialised.
1965 have_started_executing_code = True;
1966 maybe_take_snapshot(Normal, "startup");
1969 if (clo_time_unit == TimeI) { return ms_instrument2(sbIn); }
1970 else if (clo_time_unit == TimeMS) { return sbIn; }
1971 else if (clo_time_unit == TimeB) { return sbIn; }
1972 else { tl_assert2(0, "bad --time-unit value"); }
1976 //------------------------------------------------------------//
1977 //--- Writing snapshots ---//
1978 //------------------------------------------------------------//
1980 Char FP_buf[BUF_LEN];
1982 // XXX: implement f{,n}printf in m_libcprint.c eventually, and use it here.
1983 // Then change Cachegrind to use it too.
1984 #define FP(format, args...) ({ \
1985 VG_(snprintf)(FP_buf, BUF_LEN, format, ##args); \
1986 FP_buf[BUF_LEN-1] = '\0'; /* Make sure the string is terminated. */ \
1987 VG_(write)(fd, (void*)FP_buf, VG_(strlen)(FP_buf)); \
1990 // Nb: uses a static buffer, each call trashes the last string returned.
1991 static Char* make_perc(ULong x, ULong y)
1993 static Char mbuf[32];
1995 // tl_assert(x <= y); XXX; put back in later...
1997 // XXX: I'm not confident that VG_(percentify) works as it should...
1998 VG_(percentify)(x, y, 2, 6, mbuf);
1999 // XXX: this is bogus if the denominator was zero -- resulting string is
2000 // something like "0 --%")
2001 if (' ' == mbuf[0]) mbuf[0] = '0';
2005 static void pp_snapshot_SXPt(Int fd, SXPt* sxpt, Int depth, Char* depth_str,
2007 SizeT snapshot_heap_szB, SizeT snapshot_total_szB)
2009 Int i, j, n_insig_children_sxpts;
2012 // Used for printing function names. Is made static to keep it out
2013 // of the stack frame -- this function is recursive. Obviously this
2014 // now means its contents are trashed across the recursive call.
2015 static Char ip_desc_array[BUF_LEN];
2016 Char* ip_desc = ip_desc_array;
2018 switch (sxpt->tag) {
2020 // Print the SXPt itself.
2023 "(heap allocation functions) malloc/new/new[], --alloc-fns, etc.";
2025 // If it's main-or-below-main, we (if appropriate) ignore everything
2026 // below it by pretending it has no children.
2027 if ( ! VG_(clo_show_below_main) ) {
2028 Vg_FnNameKind kind = VG_(get_fnname_kind_from_IP)(sxpt->Sig.ip);
2029 if (Vg_FnNameMain == kind || Vg_FnNameBelowMain == kind) {
2030 sxpt->Sig.n_children = 0;
2034 // We need the -1 to get the line number right, But I'm not sure why.
2035 ip_desc = VG_(describe_IP)(sxpt->Sig.ip-1, ip_desc, BUF_LEN);
2038 // Do the non-ip_desc part first...
2039 FP("%sn%d: %lu ", depth_str, sxpt->Sig.n_children, sxpt->szB);
2041 // For ip_descs beginning with "0xABCD...:" addresses, we first
2042 // measure the length of the "0xabcd: " address at the start of the
2045 if ('0' == ip_desc[0] && 'x' == ip_desc[1]) {
2049 if (':' == ip_desc[j]) break;
2052 tl_assert2(0, "ip_desc has unexpected form: %s\n", ip_desc);
2056 // Nb: We treat this specially (ie. we don't use FP) so that if the
2057 // ip_desc is too long (eg. due to a long C++ function name), it'll
2058 // get truncated, but the '\n' is still there so its a valid file.
2059 // (At one point we were truncating without adding the '\n', which
2060 // caused bug #155929.)
2062 // Also, we account for the length of the address in ip_desc when
2063 // truncating. (The longest address we could have is 18 chars: "0x"
2064 // plus 16 address digits.) This ensures that the truncated function
2065 // name always has the same length, which makes truncation
2066 // deterministic and thus makes testing easier.
2068 VG_(snprintf)(FP_buf, BUF_LEN, "%s\n", ip_desc);
2069 FP_buf[BUF_LEN-18+j-5] = '.'; // "..." at the end make the
2070 FP_buf[BUF_LEN-18+j-4] = '.'; // truncation more obvious.
2071 FP_buf[BUF_LEN-18+j-3] = '.';
2072 FP_buf[BUF_LEN-18+j-2] = '\n'; // The last char is '\n'.
2073 FP_buf[BUF_LEN-18+j-1] = '\0'; // The string is terminated.
2074 VG_(write)(fd, (void*)FP_buf, VG_(strlen)(FP_buf));
2077 tl_assert(depth+1 < depth_str_len-1); // -1 for end NUL char
2078 depth_str[depth+0] = ' ';
2079 depth_str[depth+1] = '\0';
2081 // Sort SXPt's children by szB (reverse order: biggest to smallest).
2082 // Nb: we sort them here, rather than earlier (eg. in dup_XTree), for
2083 // two reasons. First, if we do it during dup_XTree, it can get
2084 // expensive (eg. 15% of execution time for konqueror
2085 // startup/shutdown). Second, this way we get the Insig SXPt (if one
2086 // is present) in its sorted position, not at the end.
2087 VG_(ssort)(sxpt->Sig.children, sxpt->Sig.n_children, sizeof(SXPt*),
2090 // Print the SXPt's children. They should already be in sorted order.
2091 n_insig_children_sxpts = 0;
2092 for (i = 0; i < sxpt->Sig.n_children; i++) {
2093 child = sxpt->Sig.children[i];
2095 if (InsigSXPt == child->tag)
2096 n_insig_children_sxpts++;
2098 // Ok, print the child. NB: contents of ip_desc_array will be
2099 // trashed by this recursive call. Doesn't matter currently,
2100 // but worth noting.
2101 pp_snapshot_SXPt(fd, child, depth+1, depth_str, depth_str_len,
2102 snapshot_heap_szB, snapshot_total_szB);
2106 depth_str[depth+0] = '\0';
2107 depth_str[depth+1] = '\0';
2109 // There should be 0 or 1 Insig children SXPts.
2110 tl_assert(n_insig_children_sxpts <= 1);
2114 Char* s = ( 1 == sxpt->Insig.n_xpts ? "," : "s, all" );
2115 FP("%sn0: %lu in %d place%s below massif's threshold (%s)\n",
2116 depth_str, sxpt->szB, sxpt->Insig.n_xpts, s,
2117 make_perc((ULong)clo_threshold, 100));
2122 tl_assert2(0, "pp_snapshot_SXPt: unrecognised SXPt tag");
2126 static void pp_snapshot(Int fd, Snapshot* snapshot, Int snapshot_n)
2128 sanity_check_snapshot(snapshot);
2130 FP("#-----------\n");
2131 FP("snapshot=%d\n", snapshot_n);
2132 FP("#-----------\n");
2133 FP("time=%lld\n", snapshot->time);
2134 FP("mem_heap_B=%lu\n", snapshot->heap_szB);
2135 FP("mem_heap_extra_B=%lu\n", snapshot->heap_extra_szB);
2136 FP("mem_stacks_B=%lu\n", snapshot->stacks_szB);
2138 if (is_detailed_snapshot(snapshot)) {
2139 // Detailed snapshot -- print heap tree.
2140 Int depth_str_len = clo_depth + 3;
2141 Char* depth_str = VG_(malloc)("ms.main.pps.1",
2142 sizeof(Char) * depth_str_len);
2143 SizeT snapshot_total_szB =
2144 snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
2145 depth_str[0] = '\0'; // Initialise depth_str to "".
2147 FP("heap_tree=%s\n", ( Peak == snapshot->kind ? "peak" : "detailed" ));
2148 pp_snapshot_SXPt(fd, snapshot->alloc_sxpt, 0, depth_str,
2149 depth_str_len, snapshot->heap_szB,
2150 snapshot_total_szB);
2152 VG_(free)(depth_str);
2155 FP("heap_tree=empty\n");
2159 static void write_snapshots_to_file(void)
2164 // Setup output filename. Nb: it's important to do this now, ie. as late
2165 // as possible. If we do it at start-up and the program forks and the
2166 // output file format string contains a %p (pid) specifier, both the
2167 // parent and child will incorrectly write to the same file; this
2168 // happened in 3.3.0.
2169 Char* massif_out_file =
2170 VG_(expand_file_name)("--massif-out-file", clo_massif_out_file);
2172 sres = VG_(open)(massif_out_file, VKI_O_CREAT|VKI_O_TRUNC|VKI_O_WRONLY,
2173 VKI_S_IRUSR|VKI_S_IWUSR);
2174 if (sr_isError(sres)) {
2175 // If the file can't be opened for whatever reason (conflict
2176 // between multiple cachegrinded processes?), give up now.
2177 VG_(umsg)("error: can't open output file '%s'\n", massif_out_file );
2178 VG_(umsg)(" ... so profiling results will be missing.\n");
2179 VG_(free)(massif_out_file);
2183 VG_(free)(massif_out_file);
2186 // Print massif-specific options that were used.
2187 // XXX: is it worth having a "desc:" line? Could just call it "options:"
2188 // -- this file format isn't as generic as Cachegrind's, so the
2189 // implied genericity of "desc:" is bogus.
2191 for (i = 0; i < VG_(sizeXA)(args_for_massif); i++) {
2192 Char* arg = *(Char**)VG_(indexXA)(args_for_massif, i);
2195 if (0 == i) FP(" (none)");
2198 // Print "cmd:" line.
2200 if (VG_(args_the_exename)) {
2201 FP("%s", VG_(args_the_exename));
2202 for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
2203 HChar* arg = * (HChar**) VG_(indexXA)( VG_(args_for_client), i );
2212 FP("time_unit: %s\n", TimeUnit_to_string(clo_time_unit));
2214 for (i = 0; i < next_snapshot_i; i++) {
2215 Snapshot* snapshot = & snapshots[i];
2216 pp_snapshot(fd, snapshot, i); // Detailed snapshot!
2221 //------------------------------------------------------------//
2222 //--- Finalisation ---//
2223 //------------------------------------------------------------//
2225 static void ms_fini(Int exit_status)
2228 write_snapshots_to_file();
2231 tl_assert(n_xpts > 0); // always have alloc_xpt
2232 STATS("heap allocs: %u\n", n_heap_allocs);
2233 STATS("heap reallocs: %u\n", n_heap_reallocs);
2234 STATS("heap frees: %u\n", n_heap_frees);
2235 STATS("ignored heap allocs: %u\n", n_ignored_heap_allocs);
2236 STATS("ignored heap frees: %u\n", n_ignored_heap_frees);
2237 STATS("ignored heap reallocs: %u\n", n_ignored_heap_reallocs);
2238 STATS("stack allocs: %u\n", n_stack_allocs);
2239 STATS("stack frees: %u\n", n_stack_frees);
2240 STATS("XPts: %u\n", n_xpts);
2241 STATS("top-XPts: %u (%d%%)\n",
2242 alloc_xpt->n_children,
2243 ( n_xpts ? alloc_xpt->n_children * 100 / n_xpts : 0));
2244 STATS("XPt init expansions: %u\n", n_xpt_init_expansions);
2245 STATS("XPt later expansions: %u\n", n_xpt_later_expansions);
2246 STATS("SXPt allocs: %u\n", n_sxpt_allocs);
2247 STATS("SXPt frees: %u\n", n_sxpt_frees);
2248 STATS("skipped snapshots: %u\n", n_skipped_snapshots);
2249 STATS("real snapshots: %u\n", n_real_snapshots);
2250 STATS("detailed snapshots: %u\n", n_detailed_snapshots);
2251 STATS("peak snapshots: %u\n", n_peak_snapshots);
2252 STATS("cullings: %u\n", n_cullings);
2253 STATS("XCon redos: %u\n", n_XCon_redos);
2257 //------------------------------------------------------------//
2258 //--- Initialisation ---//
2259 //------------------------------------------------------------//
2261 static void ms_post_clo_init(void)
2266 if (clo_threshold < 0 || clo_threshold > 100) {
2267 VG_(umsg)("--threshold must be between 0.0 and 100.0\n");
2268 VG_(err_bad_option)("--threshold");
2271 // If we have --heap=no, set --heap-admin to zero, just to make sure we
2272 // don't accidentally use a non-zero heap-admin size somewhere.
2277 // Print alloc-fns and ignore-fns, if necessary.
2278 if (VG_(clo_verbosity) > 1) {
2279 VERB(1, "alloc-fns:\n");
2280 for (i = 0; i < VG_(sizeXA)(alloc_fns); i++) {
2281 Char** fn_ptr = VG_(indexXA)(alloc_fns, i);
2282 VERB(1, " %s\n", *fn_ptr);
2285 VERB(1, "ignore-fns:\n");
2286 if (0 == VG_(sizeXA)(ignore_fns)) {
2287 VERB(1, " <empty>\n");
2289 for (i = 0; i < VG_(sizeXA)(ignore_fns); i++) {
2290 Char** fn_ptr = VG_(indexXA)(ignore_fns, i);
2291 VERB(1, " %d: %s\n", i, *fn_ptr);
2297 VG_(track_new_mem_stack) ( new_mem_stack );
2298 VG_(track_die_mem_stack) ( die_mem_stack );
2299 VG_(track_new_mem_stack_signal) ( new_mem_stack_signal );
2300 VG_(track_die_mem_stack_signal) ( die_mem_stack_signal );
2303 // Initialise snapshot array, and sanity-check it.
2304 snapshots = VG_(malloc)("ms.main.mpoci.1",
2305 sizeof(Snapshot) * clo_max_snapshots);
2306 // We don't want to do snapshot sanity checks here, because they're
2307 // currently uninitialised.
2308 for (i = 0; i < clo_max_snapshots; i++) {
2309 clear_snapshot( & snapshots[i], /*do_sanity_check*/False );
2311 sanity_check_snapshots_array();
2314 static void ms_pre_clo_init(void)
2316 VG_(details_name) ("Massif");
2317 VG_(details_version) (NULL);
2318 VG_(details_description) ("a heap profiler");
2319 VG_(details_copyright_author)(
2320 "Copyright (C) 2003-2010, and GNU GPL'd, by Nicholas Nethercote");
2321 VG_(details_bug_reports_to) (VG_BUGS_TO);
2324 VG_(basic_tool_funcs) (ms_post_clo_init,
2329 VG_(needs_libc_freeres)();
2330 VG_(needs_command_line_options)(ms_process_cmd_line_option,
2332 ms_print_debug_usage);
2333 VG_(needs_client_requests) (ms_handle_client_request);
2334 VG_(needs_sanity_checks) (ms_cheap_sanity_check,
2335 ms_expensive_sanity_check);
2336 VG_(needs_malloc_replacement) (ms_malloc,
2338 ms___builtin_vec_new,
2342 ms___builtin_delete,
2343 ms___builtin_vec_delete,
2345 ms_malloc_usable_size,
2349 malloc_list = VG_(HT_construct)( "Massif's malloc list" );
2351 // Dummy node at top of the context structure.
2352 alloc_xpt = new_XPt(/*ip*/0, /*parent*/NULL);
2354 // Initialise alloc_fns and ignore_fns.
2358 // Initialise args_for_massif.
2359 args_for_massif = VG_(newXA)(VG_(malloc), "ms.main.mprci.1",
2360 VG_(free), sizeof(HChar*));
2363 VG_DETERMINE_INTERFACE_VERSION(ms_pre_clo_init)
2365 //--------------------------------------------------------------------//
2367 //--------------------------------------------------------------------//