1 //--------------------------------------------------------------------*/
2 //--- Massif: a heap profiling tool. ms_main.c ---*/
3 //--------------------------------------------------------------------*/
6 This file is part of Massif, a Valgrind tool for profiling memory
9 Copyright (C) 2003-2010 Nicholas Nethercote
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 The GNU General Public License is contained in the file COPYING.
30 //---------------------------------------------------------------------------
32 //---------------------------------------------------------------------------
33 // Todo -- nice, but less critical:
34 // - do a graph-drawing test
35 // - make file format more generic. Obstacles:
36 // - unit prefixes are not generic
37 // - preset column widths for stats are not generic
38 // - preset column headers are not generic
39 // - "Massif arguments:" line is not generic
40 // - do snapshots on client requests
41 // - (Michael Meeks): have an interactive way to request a dump
42 // (callgrind_control-style)
44 // - "show me the extra allocations since the last snapshot"
45 // - "start/stop logging" (eg. quickly skip boring bits)
46 // - Add ability to draw multiple graphs, eg. heap-only, stack-only, total.
47 // Give each graph a title. (try to do it generically!)
48 // - allow truncation of long fnnames if the exact line number is
49 // identified? [hmm, could make getting the name of alloc-fns more
50 // difficult] [could dump full names to file, truncate in ms_print]
51 // - make --show-below-main=no work
52 // - Options like --alloc-fn='operator new(unsigned, std::nothrow_t const&)'
53 // don't work in a .valgrindrc file or in $VALGRIND_OPTS.
54 // m_commandline.c:add_args_from_string() needs to respect single quotes.
55 // - With --stack=yes, want to add a stack trace for detailed snapshots so
56 // it's clear where/why the peak is occurring. (Mattieu Castet) Also,
57 // possibly useful even with --stack=no? (Andi Yin)
60 // - To run the benchmarks:
62 // perl perf/vg_perf --tools=massif --reps=3 perf/{heap,tinycc} massif
63 // time valgrind --tool=massif --depth=100 konqueror
65 // The other benchmarks don't do much allocation, and so give similar speeds
68 // Timing results on 'nevermore' (njn's machine) as of r7013:
70 // heap 0.53s ma:12.4s (23.5x, -----)
71 // tinycc 0.46s ma: 4.9s (10.7x, -----)
72 // many-xpts 0.08s ma: 2.0s (25.0x, -----)
73 // konqueror 29.6s real 0:21.0s user
75 // [Introduction of --time-unit=i as the default slowed things down by
78 // - get_XCon accounts for about 9% of konqueror startup time. Try
79 // keeping XPt children sorted by 'ip' and use binary search in get_XCon.
80 // Requires factoring out binary search code from various places into a
81 // VG_(bsearch) function.
83 // Todo -- low priority:
84 // - In each XPt, record both bytes and the number of allocations, and
85 // possibly the global number of allocations.
86 // - (Andy Lin) Give a stack trace on detailed snapshots?
87 // - (Artur Wisz) add a feature to Massif to ignore any heap blocks larger
88 // than a certain size! Because: "linux's malloc allows to set a
89 // MMAP_THRESHOLD value, so we set it to 4096 - all blocks above that will
90 // be handled directly by the kernel, and are guaranteed to be returned to
91 // the system when freed. So we needed to profile only blocks below this
94 // File format working notes:
97 desc: --heap-admin=foo
116 n1: 5 (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
117 n1: 5 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
118 n1: 5 0x279DE6: _nl_load_locale_from_archive (in /lib/libc-2.3.5.so)
119 n1: 5 0x278E97: _nl_find_locale (in /lib/libc-2.3.5.so)
120 n1: 5 0x278871: setlocale (in /lib/libc-2.3.5.so)
121 n1: 5 0x8049821: (within /bin/date)
122 n0: 5 0x26ED5E: (below main) (in /lib/libc-2.3.5.so)
125 n_events: n time(ms) total(B) useful-heap(B) admin-heap(B) stacks(B)
133 - each snapshot specifies an x-axis value and one or more y-axis values.
134 - can display the y-axis values separately if you like
135 - can completely separate connection between snapshots and trees.
138 - how to specify and scale/abbreviate units on axes?
139 - how to combine multiple values into the y-axis?
141 --------------------------------------------------------------------------------Command: date
142 Massif arguments: --heap-admin=foo
143 ms_print arguments: massif.out
144 --------------------------------------------------------------------------------
149 | ::@ :@ :@ :@:::# :: : ::::
150 0 +-----------------------------------@---@---@-----@--@---#-------------->ms 0 713
152 Number of snapshots: 50
153 Detailed snapshots: [2, 11, 13, 19, 25, 32 (peak)]
154 -------------------------------------------------------------------------------- n time(ms) total(B) useful-heap(B) admin-heap(B) stacks(B)
155 -------------------------------------------------------------------------------- 0 0 0 0 0 0
158 100.00% (5B) (heap allocation functions) malloc/new/new[], --alloc-fns, etc.
159 ->100.00% (5B) 0x27F6E0: _nl_normalize_codeset (in /lib/libc-2.3.5.so)
162 //---------------------------------------------------------------------------
164 #include "pub_tool_basics.h"
165 #include "pub_tool_vki.h"
166 #include "pub_tool_aspacemgr.h"
167 #include "pub_tool_debuginfo.h"
168 #include "pub_tool_hashtable.h"
169 #include "pub_tool_libcbase.h"
170 #include "pub_tool_libcassert.h"
171 #include "pub_tool_libcfile.h"
172 #include "pub_tool_libcprint.h"
173 #include "pub_tool_libcproc.h"
174 #include "pub_tool_machine.h"
175 #include "pub_tool_mallocfree.h"
176 #include "pub_tool_options.h"
177 #include "pub_tool_replacemalloc.h"
178 #include "pub_tool_stacktrace.h"
179 #include "pub_tool_threadstate.h"
180 #include "pub_tool_tooliface.h"
181 #include "pub_tool_xarray.h"
182 #include "pub_tool_clientstate.h"
183 #include "pub_tool_gdbserver.h"
185 #include "valgrind.h" // For {MALLOC,FREE}LIKE_BLOCK
187 //------------------------------------------------------------*/
188 //--- Overview of operation ---*/
189 //------------------------------------------------------------*/
191 // The size of the stacks and heap is tracked. The heap is tracked in a lot
192 // of detail, enough to tell how many bytes each line of code is responsible
193 // for, more or less. The main data structure is a tree representing the
194 // call tree beneath all the allocation functions like malloc().
195 // (Alternatively, if --pages-as-heap=yes is specified, memory is tracked at
196 // the page level, and each page is treated much like a heap block. We use
197 // "heap" throughout below to cover this case because the concepts are all the
200 // "Snapshots" are recordings of the memory usage. There are two basic
202 // - Normal: these record the current time, total memory size, total heap
203 // size, heap admin size and stack size.
204 // - Detailed: these record those things in a normal snapshot, plus a very
205 // detailed XTree (see below) indicating how the heap is structured.
207 // Snapshots are taken every so often. There are two storage classes of
209 // - Temporary: Massif does a temporary snapshot every so often. The idea
210 // is to always have a certain number of temporary snapshots around. So
211 // we take them frequently to begin with, but decreasingly often as the
212 // program continues to run. Also, we remove some old ones after a while.
213 // Overall it's a kind of exponential decay thing. Most of these are
214 // normal snapshots, a small fraction are detailed snapshots.
215 // - Permanent: Massif takes a permanent (detailed) snapshot in some
216 // circumstances. They are:
217 // - Peak snapshot: When the memory usage peak is reached, it takes a
218 // snapshot. It keeps this, unless the peak is subsequently exceeded,
219 // in which case it will overwrite the peak snapshot.
220 // - User-requested snapshots: These are done in response to client
221 // requests. They are always kept.
223 // Used for printing things when clo_verbosity > 1.
224 #define VERB(verb, format, args...) \
225 if (VG_(clo_verbosity) > verb) { \
226 VG_(dmsg)("Massif: " format, ##args); \
229 // Used for printing stats when clo_stats == True.
230 #define STATS(format, args...) \
231 if (VG_(clo_stats)) { \
232 VG_(dmsg)("Massif: " format, ##args); \
235 //------------------------------------------------------------//
236 //--- Statistics ---//
237 //------------------------------------------------------------//
239 // Konqueror startup, to give an idea of the numbers involved with a biggish
240 // program, with default depth:
243 // - 310,000 allocations
245 // - 15,000 XPts 800,000 XPts
248 static UInt n_heap_allocs = 0;
249 static UInt n_heap_reallocs = 0;
250 static UInt n_heap_frees = 0;
251 static UInt n_ignored_heap_allocs = 0;
252 static UInt n_ignored_heap_frees = 0;
253 static UInt n_ignored_heap_reallocs = 0;
254 static UInt n_stack_allocs = 0;
255 static UInt n_stack_frees = 0;
256 static UInt n_xpts = 0;
257 static UInt n_xpt_init_expansions = 0;
258 static UInt n_xpt_later_expansions = 0;
259 static UInt n_sxpt_allocs = 0;
260 static UInt n_sxpt_frees = 0;
261 static UInt n_skipped_snapshots = 0;
262 static UInt n_real_snapshots = 0;
263 static UInt n_detailed_snapshots = 0;
264 static UInt n_peak_snapshots = 0;
265 static UInt n_cullings = 0;
266 static UInt n_XCon_redos = 0;
268 //------------------------------------------------------------//
270 //------------------------------------------------------------//
272 // Number of guest instructions executed so far. Only used with
274 static Long guest_instrs_executed = 0;
276 static SizeT heap_szB = 0; // Live heap size
277 static SizeT heap_extra_szB = 0; // Live heap extra size -- slop + admin bytes
278 static SizeT stacks_szB = 0; // Live stacks size
280 // This is the total size from the current peak snapshot, or 0 if no peak
281 // snapshot has been taken yet.
282 static SizeT peak_snapshot_total_szB = 0;
284 // Incremented every time memory is allocated/deallocated, by the
285 // allocated/deallocated amount; includes heap, heap-admin and stack
286 // memory. An alternative to milliseconds as a unit of program "time".
287 static ULong total_allocs_deallocs_szB = 0;
289 // When running with --heap=yes --pages-as-heap=no, we don't start taking
290 // snapshots until the first basic block is executed, rather than doing it in
291 // ms_post_clo_init (which is the obvious spot), for two reasons.
292 // - It lets us ignore stack events prior to that, because they're not
293 // really proper ones and just would screw things up.
294 // - Because there's still some core initialisation to do, and so there
295 // would be an artificial time gap between the first and second snapshots.
297 // When running with --heap=yes --pages-as-heap=yes, snapshots start much
298 // earlier due to new_mem_startup so this isn't relevant.
300 static Bool have_started_executing_code = False;
302 //------------------------------------------------------------//
303 //--- Alloc fns ---//
304 //------------------------------------------------------------//
306 static XArray* alloc_fns;
307 static XArray* ignore_fns;
309 static void init_alloc_fns(void)
311 // Create the list, and add the default elements.
312 alloc_fns = VG_(newXA)(VG_(malloc), "ms.main.iaf.1",
313 VG_(free), sizeof(Char*));
314 #define DO(x) { Char* s = x; VG_(addToXA)(alloc_fns, &s); }
316 // Ordered roughly according to (presumed) frequency.
317 // Nb: The C++ "operator new*" ones are overloadable. We include them
318 // always anyway, because even if they're overloaded, it would be a
319 // prodigiously stupid overloading that caused them to not allocate
322 // XXX: because we don't look at the first stack entry (unless it's a
323 // custom allocation) there's not much point to having all these alloc
324 // functions here -- they should never appear anywhere (I think?) other
325 // than the top stack entry. The only exceptions are those that in
326 // vg_replace_malloc.c are partly or fully implemented in terms of another
327 // alloc function: realloc (which uses malloc); valloc,
328 // malloc_zone_valloc, posix_memalign and memalign_common (which use
332 DO("__builtin_new" );
333 DO("operator new(unsigned)" );
334 DO("operator new(unsigned long)" );
335 DO("__builtin_vec_new" );
336 DO("operator new[](unsigned)" );
337 DO("operator new[](unsigned long)" );
341 DO("posix_memalign" );
343 DO("operator new(unsigned, std::nothrow_t const&)" );
344 DO("operator new[](unsigned, std::nothrow_t const&)" );
345 DO("operator new(unsigned long, std::nothrow_t const&)" );
346 DO("operator new[](unsigned long, std::nothrow_t const&)");
347 #if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
348 DO("malloc_common" );
349 DO("calloc_common" );
350 DO("realloc_common" );
351 DO("memalign_common" );
352 #elif defined(VGO_darwin)
353 DO("malloc_zone_malloc" );
354 DO("malloc_zone_calloc" );
355 DO("malloc_zone_realloc" );
356 DO("malloc_zone_memalign" );
357 DO("malloc_zone_valloc" );
361 static void init_ignore_fns(void)
363 // Create the (empty) list.
364 ignore_fns = VG_(newXA)(VG_(malloc), "ms.main.iif.1",
365 VG_(free), sizeof(Char*));
368 // Determines if the named function is a member of the XArray.
369 static Bool is_member_fn(XArray* fns, Char* fnname)
374 // Nb: It's a linear search through the list, because we're comparing
375 // strings rather than pointers to strings.
376 // Nb: This gets called a lot. It was an OSet, but they're quite slow to
377 // iterate through so it wasn't a good choice.
378 for (i = 0; i < VG_(sizeXA)(fns); i++) {
379 fn_ptr = VG_(indexXA)(fns, i);
380 if (VG_STREQ(fnname, *fn_ptr))
387 //------------------------------------------------------------//
388 //--- Command line args ---//
389 //------------------------------------------------------------//
391 #define MAX_DEPTH 200
393 typedef enum { TimeI, TimeMS, TimeB } TimeUnit;
395 static Char* TimeUnit_to_string(TimeUnit time_unit)
398 case TimeI: return "i";
399 case TimeMS: return "ms";
400 case TimeB: return "B";
401 default: tl_assert2(0, "TimeUnit_to_string: unrecognised TimeUnit");
405 static Bool clo_heap = True;
406 // clo_heap_admin is deliberately a word-sized type. At one point it was
407 // a UInt, but this caused problems on 64-bit machines when it was
408 // multiplied by a small negative number and then promoted to a
409 // word-sized type -- it ended up with a value of 4.2 billion. Sigh.
410 static SSizeT clo_heap_admin = 8;
411 static Bool clo_pages_as_heap = False;
412 static Bool clo_stacks = False;
413 static Int clo_depth = 30;
414 static double clo_threshold = 1.0; // percentage
415 static double clo_peak_inaccuracy = 1.0; // percentage
416 static Int clo_time_unit = TimeI;
417 static Int clo_detailed_freq = 10;
418 static Int clo_max_snapshots = 100;
419 static Char* clo_massif_out_file = "massif.out.%p";
421 static XArray* args_for_massif;
423 static Bool ms_process_cmd_line_option(Char* arg)
427 // Remember the arg for later use.
428 VG_(addToXA)(args_for_massif, &arg);
430 if VG_BOOL_CLO(arg, "--heap", clo_heap) {}
431 else if VG_BINT_CLO(arg, "--heap-admin", clo_heap_admin, 0, 1024) {}
433 else if VG_BOOL_CLO(arg, "--stacks", clo_stacks) {}
435 else if VG_BOOL_CLO(arg, "--pages-as-heap", clo_pages_as_heap) {}
437 else if VG_BINT_CLO(arg, "--depth", clo_depth, 1, MAX_DEPTH) {}
439 else if VG_STR_CLO(arg, "--alloc-fn", tmp_str) {
440 VG_(addToXA)(alloc_fns, &tmp_str);
442 else if VG_STR_CLO(arg, "--ignore-fn", tmp_str) {
443 VG_(addToXA)(ignore_fns, &tmp_str);
446 else if VG_DBL_CLO(arg, "--threshold", clo_threshold) {
447 if (clo_threshold < 0 || clo_threshold > 100) {
448 VG_(fmsg_bad_option)(arg,
449 "--threshold must be between 0.0 and 100.0\n");
453 else if VG_DBL_CLO(arg, "--peak-inaccuracy", clo_peak_inaccuracy) {}
455 else if VG_XACT_CLO(arg, "--time-unit=i", clo_time_unit, TimeI) {}
456 else if VG_XACT_CLO(arg, "--time-unit=ms", clo_time_unit, TimeMS) {}
457 else if VG_XACT_CLO(arg, "--time-unit=B", clo_time_unit, TimeB) {}
459 else if VG_BINT_CLO(arg, "--detailed-freq", clo_detailed_freq, 1, 1000000) {}
461 else if VG_BINT_CLO(arg, "--max-snapshots", clo_max_snapshots, 10, 1000) {}
463 else if VG_STR_CLO(arg, "--massif-out-file", clo_massif_out_file) {}
466 return VG_(replacement_malloc_process_cmd_line_option)(arg);
471 static void ms_print_usage(void)
474 " --heap=no|yes profile heap blocks [yes]\n"
475 " --heap-admin=<size> average admin bytes per heap block;\n"
476 " ignored if --heap=no [8]\n"
477 " --stacks=no|yes profile stack(s) [no]\n"
478 " --pages-as-heap=no|yes profile memory at the page level [no]\n"
479 " --depth=<number> depth of contexts [30]\n"
480 " --alloc-fn=<name> specify <name> as an alloc function [empty]\n"
481 " --ignore-fn=<name> ignore heap allocations within <name> [empty]\n"
482 " --threshold=<m.n> significance threshold, as a percentage [1.0]\n"
483 " --peak-inaccuracy=<m.n> maximum peak inaccuracy, as a percentage [1.0]\n"
484 " --time-unit=i|ms|B time unit: instructions executed, milliseconds\n"
485 " or heap bytes alloc'd/dealloc'd [i]\n"
486 " --detailed-freq=<N> every Nth snapshot should be detailed [10]\n"
487 " --max-snapshots=<N> maximum number of snapshots recorded [100]\n"
488 " --massif-out-file=<file> output file name [massif.out.%%p]\n"
492 static void ms_print_debug_usage(void)
500 //------------------------------------------------------------//
501 //--- XPts, XTrees and XCons ---//
502 //------------------------------------------------------------//
504 // An XPt represents an "execution point", ie. a code address. Each XPt is
505 // part of a tree of XPts (an "execution tree", or "XTree"). The details of
506 // the heap are represented by a single XTree.
508 // The root of the tree is 'alloc_xpt', which represents all allocation
510 // - malloc/calloc/realloc/memalign/new/new[];
511 // - user-specified allocation functions (using --alloc-fn);
512 // - custom allocation (MALLOCLIKE) points
513 // It's a bit of a fake XPt (ie. its 'ip' is zero), and is only used because
514 // it makes the code simpler.
516 // Any child of 'alloc_xpt' is called a "top-XPt". The XPts at the bottom
517 // of an XTree (leaf nodes) are "bottom-XPTs".
519 // Each path from a top-XPt to a bottom-XPt through an XTree gives an
520 // execution context ("XCon"), ie. a stack trace. (And sub-paths represent
521 // stack sub-traces.) The number of XCons in an XTree is equal to the
522 // number of bottom-XPTs in that XTree.
524 // alloc_xpt XTrees are bi-directional.
527 // > parent < Example: if child1() calls parent() and child2()
528 // / | \ also calls parent(), and parent() calls malloc(),
529 // | / \ | the XTree will look like this.
533 // (Note that malformed stack traces can lead to difficulties. See the
534 // comment at the bottom of get_XCon.)
536 // XTrees and XPts are mirrored by SXTrees and SXPts, where the 'S' is short
537 // for "saved". When the XTree is duplicated for a snapshot, we duplicate
538 // it as an SXTree, which is similar but omits some things it does not need,
539 // and aggregates up insignificant nodes. This is important as an SXTree is
540 // typically much smaller than an XTree.
542 // XXX: make XPt and SXPt extensible arrays, to avoid having to do two
543 // allocations per Pt.
545 typedef struct _XPt XPt;
547 Addr ip; // code address
549 // Bottom-XPts: space for the precise context.
550 // Other XPts: space of all the descendent bottom-XPts.
551 // Nb: this value goes up and down as the program executes.
554 XPt* parent; // pointer to parent XPt
557 // n_children and max_children are 32-bit integers. 16-bit integers
558 // are too small -- a very big program might have more than 65536
559 // allocation points (ie. top-XPts) -- Konqueror starting up has 1800.
560 UInt n_children; // number of children
561 UInt max_children; // capacity of children array
562 XPt** children; // pointers to children XPts
572 typedef struct _SXPt SXPt;
575 SizeT szB; // memory size for the node, be it Sig or Insig
577 // An SXPt representing a single significant code location. Much like
578 // an XPt, minus the fields that aren't necessary.
586 // An SXPt representing one or more code locations, all below the
587 // significance threshold.
589 Int n_xpts; // number of aggregated XPts
595 // Fake XPt representing all allocation functions like malloc(). Acts as
596 // parent node to all top-XPts.
597 static XPt* alloc_xpt;
599 // Cheap allocation for blocks that never need to be freed. Saves about 10%
600 // for Konqueror startup with --depth=40.
601 static void* perm_malloc(SizeT n_bytes)
603 static Addr hp = 0; // current heap pointer
604 static Addr hp_lim = 0; // maximum usable byte in current block
606 #define SUPERBLOCK_SIZE (1 << 20) // 1 MB
608 if (hp + n_bytes > hp_lim) {
609 hp = (Addr)VG_(am_shadow_alloc)(SUPERBLOCK_SIZE);
611 VG_(out_of_memory_NORETURN)( "massif:perm_malloc",
613 hp_lim = hp + SUPERBLOCK_SIZE - 1;
618 return (void*)(hp - n_bytes);
621 static XPt* new_XPt(Addr ip, XPt* parent)
623 // XPts are never freed, so we can use perm_malloc to allocate them.
624 // Note that we cannot use perm_malloc for the 'children' array, because
625 // that needs to be resizable.
626 XPt* xpt = perm_malloc(sizeof(XPt));
629 xpt->parent = parent;
631 // We don't initially allocate any space for children. We let that
632 // happen on demand. Many XPts (ie. all the bottom-XPts) don't have any
635 xpt->max_children = 0;
636 xpt->children = NULL;
644 static void add_child_xpt(XPt* parent, XPt* child)
646 // Expand 'children' if necessary.
647 tl_assert(parent->n_children <= parent->max_children);
648 if (parent->n_children == parent->max_children) {
649 if (0 == parent->max_children) {
650 parent->max_children = 4;
651 parent->children = VG_(malloc)( "ms.main.acx.1",
652 parent->max_children * sizeof(XPt*) );
653 n_xpt_init_expansions++;
655 parent->max_children *= 2; // Double size
656 parent->children = VG_(realloc)( "ms.main.acx.2",
658 parent->max_children * sizeof(XPt*) );
659 n_xpt_later_expansions++;
663 // Insert new child XPt in parent's children list.
664 parent->children[ parent->n_children++ ] = child;
667 // Reverse comparison for a reverse sort -- biggest to smallest.
668 static Int SXPt_revcmp_szB(void* n1, void* n2)
670 SXPt* sxpt1 = *(SXPt**)n1;
671 SXPt* sxpt2 = *(SXPt**)n2;
672 return ( sxpt1->szB < sxpt2->szB ? 1
673 : sxpt1->szB > sxpt2->szB ? -1
677 //------------------------------------------------------------//
678 //--- XTree Operations ---//
679 //------------------------------------------------------------//
681 // Duplicates an XTree as an SXTree.
682 static SXPt* dup_XTree(XPt* xpt, SizeT total_szB)
684 Int i, n_sig_children, n_insig_children, n_child_sxpts;
685 SizeT sig_child_threshold_szB;
688 // Number of XPt children Action for SXPT
689 // ------------------ ---------------
690 // 0 sig, 0 insig alloc 0 children
691 // N sig, 0 insig alloc N children, dup all
692 // N sig, M insig alloc N+1, dup first N, aggregate remaining M
693 // 0 sig, M insig alloc 1, aggregate M
695 // Work out how big a child must be to be significant. If the current
696 // total_szB is zero, then we set it to 1, which means everything will be
697 // judged insignificant -- this is sensible, as there's no point showing
698 // any detail for this case. Unless they used --threshold=0, in which
699 // case we show them everything because that's what they asked for.
701 // Nb: We do this once now, rather than once per child, because if we do
702 // that the cost of all the divisions adds up to something significant.
703 if (0 == total_szB && 0 != clo_threshold) {
704 sig_child_threshold_szB = 1;
706 sig_child_threshold_szB = (SizeT)((total_szB * clo_threshold) / 100);
709 // How many children are significant? And do we need an aggregate SXPt?
711 for (i = 0; i < xpt->n_children; i++) {
712 if (xpt->children[i]->szB >= sig_child_threshold_szB) {
716 n_insig_children = xpt->n_children - n_sig_children;
717 n_child_sxpts = n_sig_children + ( n_insig_children > 0 ? 1 : 0 );
719 // Duplicate the XPt.
720 sxpt = VG_(malloc)("ms.main.dX.1", sizeof(SXPt));
723 sxpt->szB = xpt->szB;
724 sxpt->Sig.ip = xpt->ip;
725 sxpt->Sig.n_children = n_child_sxpts;
727 // Create the SXPt's children.
728 if (n_child_sxpts > 0) {
730 SizeT sig_children_szB = 0, insig_children_szB = 0;
731 sxpt->Sig.children = VG_(malloc)("ms.main.dX.2",
732 n_child_sxpts * sizeof(SXPt*));
734 // Duplicate the significant children. (Nb: sig_children_szB +
735 // insig_children_szB doesn't necessarily equal xpt->szB.)
737 for (i = 0; i < xpt->n_children; i++) {
738 if (xpt->children[i]->szB >= sig_child_threshold_szB) {
739 sxpt->Sig.children[j++] = dup_XTree(xpt->children[i], total_szB);
740 sig_children_szB += xpt->children[i]->szB;
742 insig_children_szB += xpt->children[i]->szB;
746 // Create the SXPt for the insignificant children, if any, and put it
747 // in the last child entry.
748 if (n_insig_children > 0) {
749 // Nb: We 'n_sxpt_allocs' here because creating an Insig SXPt
750 // doesn't involve a call to dup_XTree().
751 SXPt* insig_sxpt = VG_(malloc)("ms.main.dX.3", sizeof(SXPt));
753 insig_sxpt->tag = InsigSXPt;
754 insig_sxpt->szB = insig_children_szB;
755 insig_sxpt->Insig.n_xpts = n_insig_children;
756 sxpt->Sig.children[n_sig_children] = insig_sxpt;
759 sxpt->Sig.children = NULL;
765 static void free_SXTree(SXPt* sxpt)
768 tl_assert(sxpt != NULL);
772 // Free all children SXPts, then the children array.
773 for (i = 0; i < sxpt->Sig.n_children; i++) {
774 free_SXTree(sxpt->Sig.children[i]);
775 sxpt->Sig.children[i] = NULL;
777 VG_(free)(sxpt->Sig.children); sxpt->Sig.children = NULL;
783 default: tl_assert2(0, "free_SXTree: unknown SXPt tag");
786 // Free the SXPt itself.
787 VG_(free)(sxpt); sxpt = NULL;
791 // Sanity checking: we periodically check the heap XTree with
792 // ms_expensive_sanity_check.
793 static void sanity_check_XTree(XPt* xpt, XPt* parent)
795 tl_assert(xpt != NULL);
797 // Check back-pointer.
798 tl_assert2(xpt->parent == parent,
799 "xpt->parent = %p, parent = %p\n", xpt->parent, parent);
801 // Check children counts look sane.
802 tl_assert(xpt->n_children <= xpt->max_children);
804 // Unfortunately, xpt's size is not necessarily equal to the sum of xpt's
805 // children's sizes. See comment at the bottom of get_XCon.
808 // Sanity checking: we check SXTrees (which are in snapshots) after
809 // snapshots are created, before they are deleted, and before they are
811 static void sanity_check_SXTree(SXPt* sxpt)
815 tl_assert(sxpt != NULL);
817 // Check the sum of any children szBs equals the SXPt's szB. Check the
818 // children at the same time.
821 if (sxpt->Sig.n_children > 0) {
822 for (i = 0; i < sxpt->Sig.n_children; i++) {
823 sanity_check_SXTree(sxpt->Sig.children[i]);
831 default: tl_assert2(0, "sanity_check_SXTree: unknown SXPt tag");
836 //------------------------------------------------------------//
837 //--- XCon Operations ---//
838 //------------------------------------------------------------//
840 // This is the limit on the number of removed alloc-fns that can be in a
842 #define MAX_OVERESTIMATE 50
843 #define MAX_IPS (MAX_DEPTH + MAX_OVERESTIMATE)
845 // This is used for various buffers which can hold function names/IP
846 // description. Some C++ names can get really long so 1024 isn't big
850 // Determine if the given IP belongs to a function that should be ignored.
851 static Bool fn_should_be_ignored(Addr ip)
853 static Char buf[BUF_LEN];
855 ( VG_(get_fnname)(ip, buf, BUF_LEN) && is_member_fn(ignore_fns, buf)
859 // Get the stack trace for an XCon, filtering out uninteresting entries:
860 // alloc-fns and entries above alloc-fns, and entries below main-or-below-main.
861 // Eg: alloc-fn1 / alloc-fn2 / a / b / main / (below main) / c
862 // becomes: a / b / main
863 // Nb: it's possible to end up with an empty trace, eg. if 'main' is marked
864 // as an alloc-fn. This is ok.
866 Int get_IPs( ThreadId tid, Bool exclude_first_entry, Addr ips[])
868 static Char buf[BUF_LEN];
869 Int n_ips, i, n_alloc_fns_removed;
873 // We ask for a few more IPs than clo_depth suggests we need. Then we
874 // remove every entry that is an alloc-fn. Depending on the
875 // circumstances, we may need to redo it all, asking for more IPs.
877 // - If the original stack trace is smaller than asked-for, redo=False
878 // - Else if after filtering we have >= clo_depth IPs, redo=False
880 // In other words, to redo, we'd have to get a stack trace as big as we
881 // asked for and remove more than 'overestimate' alloc-fns.
884 redo = True; // Assume this to begin with.
885 for (overestimate = 3; redo; overestimate += 6) {
886 // This should never happen -- would require MAX_OVERESTIMATE
887 // alloc-fns to be removed from the stack trace.
888 if (overestimate > MAX_OVERESTIMATE)
889 VG_(tool_panic)("get_IPs: ips[] too small, inc. MAX_OVERESTIMATE?");
891 // Ask for more IPs than clo_depth suggests we need.
892 n_ips = VG_(get_StackTrace)( tid, ips, clo_depth + overestimate,
893 NULL/*array to dump SP values in*/,
894 NULL/*array to dump FP values in*/,
895 0/*first_ip_delta*/ );
896 tl_assert(n_ips > 0);
898 // If the original stack trace is smaller than asked-for, redo=False.
899 if (n_ips < clo_depth + overestimate) { redo = False; }
901 // Filter out alloc fns. If requested, we automatically remove the
902 // first entry (which presumably will be something like malloc or
903 // __builtin_new that we're sure to filter out) without looking at it,
904 // because VG_(get_fnname) is expensive.
905 n_alloc_fns_removed = ( exclude_first_entry ? 1 : 0 );
906 for (i = n_alloc_fns_removed; i < n_ips; i++) {
907 if (VG_(get_fnname)(ips[i], buf, BUF_LEN)) {
908 if (is_member_fn(alloc_fns, buf)) {
909 n_alloc_fns_removed++;
915 // Remove the alloc fns by shuffling the rest down over them.
916 n_ips -= n_alloc_fns_removed;
917 for (i = 0; i < n_ips; i++) {
918 ips[i] = ips[i + n_alloc_fns_removed];
921 // If after filtering we have >= clo_depth IPs, redo=False
922 if (n_ips >= clo_depth) {
924 n_ips = clo_depth; // Ignore any IPs below --depth.
934 // Gets an XCon and puts it in the tree. Returns the XCon's bottom-XPt.
935 // Unless the allocation should be ignored, in which case we return NULL.
936 static XPt* get_XCon( ThreadId tid, Bool exclude_first_entry )
938 static Addr ips[MAX_IPS];
940 XPt* xpt = alloc_xpt;
942 // After this call, the IPs we want are in ips[0]..ips[n_ips-1].
943 Int n_ips = get_IPs(tid, exclude_first_entry, ips);
945 // Should we ignore this allocation? (Nb: n_ips can be zero, eg. if
946 // 'main' is marked as an alloc-fn.)
947 if (n_ips > 0 && fn_should_be_ignored(ips[0])) {
951 // Now do the search/insertion of the XCon.
952 for (i = 0; i < n_ips; i++) {
955 // Look for IP in xpt's children.
956 // Linear search, ugh -- about 10% of time for konqueror startup tried
957 // caching last result, only hit about 4% for konqueror.
958 // Nb: this search hits about 98% of the time for konqueror
959 for (ch = 0; True; ch++) {
960 if (ch == xpt->n_children) {
961 // IP not found in the children.
962 // Create and add new child XPt, then stop.
963 XPt* new_child_xpt = new_XPt(ip, xpt);
964 add_child_xpt(xpt, new_child_xpt);
968 } else if (ip == xpt->children[ch]->ip) {
969 // Found the IP in the children, stop.
970 xpt = xpt->children[ch];
976 // [Note: several comments refer to this comment. Do not delete it
977 // without updating them.]
979 // A complication... If all stack traces were well-formed, then the
980 // returned xpt would always be a bottom-XPt. As a consequence, an XPt's
981 // size would always be equal to the sum of its children's sizes, which
982 // is an excellent sanity check.
984 // Unfortunately, stack traces occasionally are malformed, ie. truncated.
985 // This allows a stack trace to be a sub-trace of another, eg. a/b/c is a
986 // sub-trace of a/b/c/d. So we can't assume this xpt is a bottom-XPt;
987 // nor can we do sanity check an XPt's size against its children's sizes.
988 // This is annoying, but must be dealt with. (Older versions of Massif
989 // had this assertion in, and it was reported to fail by real users a
990 // couple of times.) Even more annoyingly, I can't come up with a simple
991 // test case that exhibit such a malformed stack trace, so I can't
992 // regression test it. Sigh.
994 // However, we can print a warning, so that if it happens (unexpectedly)
995 // in existing regression tests we'll know. Also, it warns users that
996 // the output snapshots may not add up the way they might expect.
998 //tl_assert(0 == xpt->n_children); // Must be bottom-XPt
999 if (0 != xpt->n_children) {
1000 static Int n_moans = 0;
1003 "Warning: Malformed stack trace detected. In Massif's output,\n");
1005 " the size of an entry's child entries may not sum up\n");
1007 " to the entry's size as they normally do.\n");
1011 " (And Massif now won't warn about this again.)\n");
1017 // Update 'szB' of every XPt in the XCon, by percolating upwards.
1018 static void update_XCon(XPt* xpt, SSizeT space_delta)
1020 tl_assert(clo_heap);
1021 tl_assert(NULL != xpt);
1023 if (0 == space_delta)
1026 while (xpt != alloc_xpt) {
1027 if (space_delta < 0) tl_assert(xpt->szB >= -space_delta);
1028 xpt->szB += space_delta;
1031 if (space_delta < 0) tl_assert(alloc_xpt->szB >= -space_delta);
1032 alloc_xpt->szB += space_delta;
1036 //------------------------------------------------------------//
1037 //--- Snapshots ---//
1038 //------------------------------------------------------------//
1040 // Snapshots are done in a way so that we always have a reasonable number of
1041 // them. We start by taking them quickly. Once we hit our limit, we cull
1042 // some (eg. half), and start taking them more slowly. Once we hit the
1043 // limit again, we again cull and then take them even more slowly, and so
1046 // Time is measured either in i or ms or bytes, depending on the --time-unit
1047 // option. It's a Long because it can exceed 32-bits reasonably easily, and
1048 // because we need to allow negative values to represent unset times.
1051 #define UNUSED_SNAPSHOT_TIME -333 // A conspicuous negative number.
1066 SizeT heap_extra_szB;// Heap slop + admin bytes.
1068 SXPt* alloc_sxpt; // Heap XTree root, if a detailed snapshot,
1069 } // otherwise NULL.
1072 static UInt next_snapshot_i = 0; // Index of where next snapshot will go.
1073 static Snapshot* snapshots; // Array of snapshots.
1075 static Bool is_snapshot_in_use(Snapshot* snapshot)
1077 if (Unused == snapshot->kind) {
1078 // If snapshot is unused, check all the fields are unset.
1079 tl_assert(snapshot->time == UNUSED_SNAPSHOT_TIME);
1080 tl_assert(snapshot->heap_extra_szB == 0);
1081 tl_assert(snapshot->heap_szB == 0);
1082 tl_assert(snapshot->stacks_szB == 0);
1083 tl_assert(snapshot->alloc_sxpt == NULL);
1086 tl_assert(snapshot->time != UNUSED_SNAPSHOT_TIME);
1091 static Bool is_detailed_snapshot(Snapshot* snapshot)
1093 return (snapshot->alloc_sxpt ? True : False);
1096 static Bool is_uncullable_snapshot(Snapshot* snapshot)
1098 return &snapshots[0] == snapshot // First snapshot
1099 || &snapshots[next_snapshot_i-1] == snapshot // Last snapshot
1100 || snapshot->kind == Peak; // Peak snapshot
1103 static void sanity_check_snapshot(Snapshot* snapshot)
1105 if (snapshot->alloc_sxpt) {
1106 sanity_check_SXTree(snapshot->alloc_sxpt);
1110 // All the used entries should look used, all the unused ones should be clear.
1111 static void sanity_check_snapshots_array(void)
1114 for (i = 0; i < next_snapshot_i; i++) {
1115 tl_assert( is_snapshot_in_use( & snapshots[i] ));
1117 for ( ; i < clo_max_snapshots; i++) {
1118 tl_assert(!is_snapshot_in_use( & snapshots[i] ));
1122 // This zeroes all the fields in the snapshot, but does not free the heap
1123 // XTree if present. It also does a sanity check unless asked not to; we
1124 // can't sanity check at startup when clearing the initial snapshots because
1125 // they're full of junk.
1126 static void clear_snapshot(Snapshot* snapshot, Bool do_sanity_check)
1128 if (do_sanity_check) sanity_check_snapshot(snapshot);
1129 snapshot->kind = Unused;
1130 snapshot->time = UNUSED_SNAPSHOT_TIME;
1131 snapshot->heap_extra_szB = 0;
1132 snapshot->heap_szB = 0;
1133 snapshot->stacks_szB = 0;
1134 snapshot->alloc_sxpt = NULL;
1137 // This zeroes all the fields in the snapshot, and frees the heap XTree if
1139 static void delete_snapshot(Snapshot* snapshot)
1141 // Nb: if there's an XTree, we free it after calling clear_snapshot,
1142 // because clear_snapshot does a sanity check which includes checking the
1144 SXPt* tmp_sxpt = snapshot->alloc_sxpt;
1145 clear_snapshot(snapshot, /*do_sanity_check*/True);
1147 free_SXTree(tmp_sxpt);
1151 static void VERB_snapshot(Int verbosity, Char* prefix, Int i)
1153 Snapshot* snapshot = &snapshots[i];
1155 switch (snapshot->kind) {
1156 case Peak: suffix = "p"; break;
1157 case Normal: suffix = ( is_detailed_snapshot(snapshot) ? "d" : "." ); break;
1158 case Unused: suffix = "u"; break;
1160 tl_assert2(0, "VERB_snapshot: unknown snapshot kind: %d", snapshot->kind);
1162 VERB(verbosity, "%s S%s%3d (t:%lld, hp:%ld, ex:%ld, st:%ld)\n",
1166 snapshot->heap_extra_szB,
1167 snapshot->stacks_szB
1171 // Cull half the snapshots; we choose those that represent the smallest
1172 // time-spans, because that gives us the most even distribution of snapshots
1173 // over time. (It's possible to lose interesting spikes, however.)
1175 // Algorithm for N snapshots: We find the snapshot representing the smallest
1176 // timeframe, and remove it. We repeat this until (N/2) snapshots are gone.
1177 // We have to do this one snapshot at a time, rather than finding the (N/2)
1178 // smallest snapshots in one hit, because when a snapshot is removed, its
1179 // neighbours immediately cover greater timespans. So it's O(N^2), but N is
1180 // small, and it's not done very often.
1182 // Once we're done, we return the new smallest interval between snapshots.
1183 // That becomes our minimum time interval.
1184 static UInt cull_snapshots(void)
1186 Int i, jp, j, jn, min_timespan_i;
1192 // Sets j to the index of the first not-yet-removed snapshot at or after i
1193 #define FIND_SNAPSHOT(i, j) \
1195 j < clo_max_snapshots && !is_snapshot_in_use(&snapshots[j]); \
1198 VERB(2, "Culling...\n");
1200 // First we remove enough snapshots by clearing them in-place. Once
1201 // that's done, we can slide the remaining ones down.
1202 for (i = 0; i < clo_max_snapshots/2; i++) {
1203 // Find the snapshot representing the smallest timespan. The timespan
1204 // for snapshot n = d(N-1,N)+d(N,N+1), where d(A,B) is the time between
1205 // snapshot A and B. We don't consider the first and last snapshots for
1207 Snapshot* min_snapshot;
1210 // Initial triple: (prev, curr, next) == (jp, j, jn)
1211 // Initial min_timespan is the first one.
1213 FIND_SNAPSHOT(1, j);
1214 FIND_SNAPSHOT(j+1, jn);
1215 min_timespan = 0x7fffffffffffffffLL;
1217 while (jn < clo_max_snapshots) {
1218 Time timespan = snapshots[jn].time - snapshots[jp].time;
1219 tl_assert(timespan >= 0);
1220 // Nb: We never cull the peak snapshot.
1221 if (Peak != snapshots[j].kind && timespan < min_timespan) {
1222 min_timespan = timespan;
1225 // Move on to next triple
1228 FIND_SNAPSHOT(jn+1, jn);
1230 // We've found the least important snapshot, now delete it. First
1231 // print it if necessary.
1232 tl_assert(-1 != min_j); // Check we found a minimum.
1233 min_snapshot = & snapshots[ min_j ];
1234 if (VG_(clo_verbosity) > 1) {
1236 VG_(snprintf)(buf, 64, " %3d (t-span = %lld)", i, min_timespan);
1237 VERB_snapshot(2, buf, min_j);
1239 delete_snapshot(min_snapshot);
1243 // Slide down the remaining snapshots over the removed ones. First set i
1244 // to point to the first empty slot, and j to the first full slot after
1245 // i. Then slide everything down.
1246 for (i = 0; is_snapshot_in_use( &snapshots[i] ); i++) { }
1247 for (j = i; !is_snapshot_in_use( &snapshots[j] ); j++) { }
1248 for ( ; j < clo_max_snapshots; j++) {
1249 if (is_snapshot_in_use( &snapshots[j] )) {
1250 snapshots[i++] = snapshots[j];
1251 clear_snapshot(&snapshots[j], /*do_sanity_check*/True);
1254 next_snapshot_i = i;
1256 // Check snapshots array looks ok after changes.
1257 sanity_check_snapshots_array();
1259 // Find the minimum timespan remaining; that will be our new minimum
1260 // time interval. Note that above we were finding timespans by measuring
1261 // two intervals around a snapshot that was under consideration for
1262 // deletion. Here we only measure single intervals because all the
1263 // deletions have occurred.
1265 // But we have to be careful -- some snapshots (eg. snapshot 0, and the
1266 // peak snapshot) are uncullable. If two uncullable snapshots end up
1267 // next to each other, they'll never be culled (assuming the peak doesn't
1268 // change), and the time gap between them will not change. However, the
1269 // time between the remaining cullable snapshots will grow ever larger.
1270 // This means that the min_timespan found will always be that between the
1271 // two uncullable snapshots, and it will be much smaller than it should
1272 // be. To avoid this problem, when computing the minimum timespan, we
1273 // ignore any timespans between two uncullable snapshots.
1274 tl_assert(next_snapshot_i > 1);
1275 min_timespan = 0x7fffffffffffffffLL;
1276 min_timespan_i = -1;
1277 for (i = 1; i < next_snapshot_i; i++) {
1278 if (is_uncullable_snapshot(&snapshots[i]) &&
1279 is_uncullable_snapshot(&snapshots[i-1]))
1281 VERB(2, "(Ignoring interval %d--%d when computing minimum)\n", i-1, i);
1283 Time timespan = snapshots[i].time - snapshots[i-1].time;
1284 tl_assert(timespan >= 0);
1285 if (timespan < min_timespan) {
1286 min_timespan = timespan;
1291 tl_assert(-1 != min_timespan_i); // Check we found a minimum.
1293 // Print remaining snapshots, if necessary.
1294 if (VG_(clo_verbosity) > 1) {
1295 VERB(2, "Finished culling (%3d of %3d deleted)\n",
1296 n_deleted, clo_max_snapshots);
1297 for (i = 0; i < next_snapshot_i; i++) {
1298 VERB_snapshot(2, " post-cull", i);
1300 VERB(2, "New time interval = %lld (between snapshots %d and %d)\n",
1301 min_timespan, min_timespan_i-1, min_timespan_i);
1304 return min_timespan;
1307 static Time get_time(void)
1309 // Get current time, in whatever time unit we're using.
1310 if (clo_time_unit == TimeI) {
1311 return guest_instrs_executed;
1312 } else if (clo_time_unit == TimeMS) {
1313 // Some stuff happens between the millisecond timer being initialised
1314 // to zero and us taking our first snapshot. We determine that time
1315 // gap so we can subtract it from all subsequent times so that our
1316 // first snapshot is considered to be at t = 0ms. Unfortunately, a
1317 // bunch of symbols get read after the first snapshot is taken but
1318 // before the second one (which is triggered by the first allocation),
1319 // so when the time-unit is 'ms' we always have a big gap between the
1320 // first two snapshots. But at least users won't have to wonder why
1321 // the first snapshot isn't at t=0.
1322 static Bool is_first_get_time = True;
1323 static Time start_time_ms;
1324 if (is_first_get_time) {
1325 start_time_ms = VG_(read_millisecond_timer)();
1326 is_first_get_time = False;
1329 return VG_(read_millisecond_timer)() - start_time_ms;
1331 } else if (clo_time_unit == TimeB) {
1332 return total_allocs_deallocs_szB;
1334 tl_assert2(0, "bad --time-unit value");
1338 // Take a snapshot, and only that -- decisions on whether to take a
1339 // snapshot, or what kind of snapshot, are made elsewhere.
1340 // Nb: we call the arg "my_time" because "time" shadows a global declaration
1341 // in /usr/include/time.h on Darwin.
1343 take_snapshot(Snapshot* snapshot, SnapshotKind kind, Time my_time,
1346 tl_assert(!is_snapshot_in_use(snapshot));
1347 if (!clo_pages_as_heap) {
1348 tl_assert(have_started_executing_code);
1351 // Heap and heap admin.
1353 snapshot->heap_szB = heap_szB;
1355 SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
1356 snapshot->alloc_sxpt = dup_XTree(alloc_xpt, total_szB);
1357 tl_assert( alloc_xpt->szB == heap_szB);
1358 tl_assert(snapshot->alloc_sxpt->szB == heap_szB);
1360 snapshot->heap_extra_szB = heap_extra_szB;
1365 snapshot->stacks_szB = stacks_szB;
1368 // Rest of snapshot.
1369 snapshot->kind = kind;
1370 snapshot->time = my_time;
1371 sanity_check_snapshot(snapshot);
1374 if (Peak == kind) n_peak_snapshots++;
1375 if (is_detailed) n_detailed_snapshots++;
1380 // Take a snapshot, if it's time, or if we've hit a peak.
1382 maybe_take_snapshot(SnapshotKind kind, Char* what)
1384 // 'min_time_interval' is the minimum time interval between snapshots.
1385 // If we try to take a snapshot and less than this much time has passed,
1386 // we don't take it. It gets larger as the program runs longer. It's
1387 // initialised to zero so that we begin by taking snapshots as quickly as
1389 static Time min_time_interval = 0;
1390 // Zero allows startup snapshot.
1391 static Time earliest_possible_time_of_next_snapshot = 0;
1392 static Int n_snapshots_since_last_detailed = 0;
1393 static Int n_skipped_snapshots_since_last_snapshot = 0;
1397 // Nb: we call this variable "my_time" because "time" shadows a global
1398 // declaration in /usr/include/time.h on Darwin.
1399 Time my_time = get_time();
1403 // Only do a snapshot if it's time.
1404 if (my_time < earliest_possible_time_of_next_snapshot) {
1405 n_skipped_snapshots++;
1406 n_skipped_snapshots_since_last_snapshot++;
1409 is_detailed = (clo_detailed_freq-1 == n_snapshots_since_last_detailed);
1413 // Because we're about to do a deallocation, we're coming down from a
1414 // local peak. If it is (a) actually a global peak, and (b) a certain
1415 // amount bigger than the previous peak, then we take a peak snapshot.
1416 // By not taking a snapshot for every peak, we save a lot of effort --
1417 // because many peaks remain peak only for a short time.
1418 SizeT total_szB = heap_szB + heap_extra_szB + stacks_szB;
1419 SizeT excess_szB_for_new_peak =
1420 (SizeT)((peak_snapshot_total_szB * clo_peak_inaccuracy) / 100);
1421 if (total_szB <= peak_snapshot_total_szB + excess_szB_for_new_peak) {
1429 tl_assert2(0, "maybe_take_snapshot: unrecognised snapshot kind");
1432 // Take the snapshot.
1433 snapshot = & snapshots[next_snapshot_i];
1434 take_snapshot(snapshot, kind, my_time, is_detailed);
1436 // Record if it was detailed.
1438 n_snapshots_since_last_detailed = 0;
1440 n_snapshots_since_last_detailed++;
1443 // Update peak data, if it's a Peak snapshot.
1445 Int i, number_of_peaks_snapshots_found = 0;
1447 // Sanity check the size, then update our recorded peak.
1448 SizeT snapshot_total_szB =
1449 snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
1450 tl_assert2(snapshot_total_szB > peak_snapshot_total_szB,
1451 "%ld, %ld\n", snapshot_total_szB, peak_snapshot_total_szB);
1452 peak_snapshot_total_szB = snapshot_total_szB;
1454 // Find the old peak snapshot, if it exists, and mark it as normal.
1455 for (i = 0; i < next_snapshot_i; i++) {
1456 if (Peak == snapshots[i].kind) {
1457 snapshots[i].kind = Normal;
1458 number_of_peaks_snapshots_found++;
1461 tl_assert(number_of_peaks_snapshots_found <= 1);
1464 // Finish up verbosity and stats stuff.
1465 if (n_skipped_snapshots_since_last_snapshot > 0) {
1466 VERB(2, " (skipped %d snapshot%s)\n",
1467 n_skipped_snapshots_since_last_snapshot,
1468 ( 1 == n_skipped_snapshots_since_last_snapshot ? "" : "s") );
1470 VERB_snapshot(2, what, next_snapshot_i);
1471 n_skipped_snapshots_since_last_snapshot = 0;
1473 // Cull the entries, if our snapshot table is full.
1475 if (clo_max_snapshots == next_snapshot_i) {
1476 min_time_interval = cull_snapshots();
1479 // Work out the earliest time when the next snapshot can happen.
1480 earliest_possible_time_of_next_snapshot = my_time + min_time_interval;
1484 //------------------------------------------------------------//
1485 //--- Sanity checking ---//
1486 //------------------------------------------------------------//
1488 static Bool ms_cheap_sanity_check ( void )
1490 return True; // Nothing useful we can cheaply check.
1493 static Bool ms_expensive_sanity_check ( void )
1495 sanity_check_XTree(alloc_xpt, /*parent*/NULL);
1496 sanity_check_snapshots_array();
1501 //------------------------------------------------------------//
1502 //--- Heap management ---//
1503 //------------------------------------------------------------//
1505 // Metadata for heap blocks. Each one contains a pointer to a bottom-XPt,
1506 // which is a foothold into the XCon at which it was allocated. From
1507 // HP_Chunks, XPt 'space' fields are incremented (at allocation) and
1508 // decremented (at deallocation).
1510 // Nb: first two fields must match core's VgHashNode.
1513 struct _HP_Chunk* next;
1514 Addr data; // Ptr to actual block
1515 SizeT req_szB; // Size requested
1516 SizeT slop_szB; // Extra bytes given above those requested
1517 XPt* where; // Where allocated; bottom-XPt
1521 static VgHashTable malloc_list = NULL; // HP_Chunks
1523 static void update_alloc_stats(SSizeT szB_delta)
1525 // Update total_allocs_deallocs_szB.
1526 if (szB_delta < 0) szB_delta = -szB_delta;
1527 total_allocs_deallocs_szB += szB_delta;
1530 static void update_heap_stats(SSizeT heap_szB_delta, Int heap_extra_szB_delta)
1532 if (heap_szB_delta < 0)
1533 tl_assert(heap_szB >= -heap_szB_delta);
1534 if (heap_extra_szB_delta < 0)
1535 tl_assert(heap_extra_szB >= -heap_extra_szB_delta);
1537 heap_extra_szB += heap_extra_szB_delta;
1538 heap_szB += heap_szB_delta;
1540 update_alloc_stats(heap_szB_delta + heap_extra_szB_delta);
1544 void* record_block( ThreadId tid, void* p, SizeT req_szB, SizeT slop_szB,
1545 Bool exclude_first_entry, Bool maybe_snapshot )
1547 // Make new HP_Chunk node, add to malloc_list
1548 HP_Chunk* hc = VG_(malloc)("ms.main.rb.1", sizeof(HP_Chunk));
1549 hc->req_szB = req_szB;
1550 hc->slop_szB = slop_szB;
1553 VG_(HT_add_node)(malloc_list, hc);
1556 VERB(3, "<<< record_block (%lu, %lu)\n", req_szB, slop_szB);
1558 hc->where = get_XCon( tid, exclude_first_entry );
1561 // Update statistics.
1564 // Update heap stats.
1565 update_heap_stats(req_szB, clo_heap_admin + slop_szB);
1568 update_XCon(hc->where, req_szB);
1570 // Maybe take a snapshot.
1571 if (maybe_snapshot) {
1572 maybe_take_snapshot(Normal, " alloc");
1576 // Ignored allocation.
1577 n_ignored_heap_allocs++;
1579 VERB(3, "(ignored)\n");
1589 void* alloc_and_record_block ( ThreadId tid, SizeT req_szB, SizeT req_alignB,
1592 SizeT actual_szB, slop_szB;
1595 if ((SSizeT)req_szB < 0) return NULL;
1597 // Allocate and zero if necessary.
1598 p = VG_(cli_malloc)( req_alignB, req_szB );
1602 if (is_zeroed) VG_(memset)(p, 0, req_szB);
1603 actual_szB = VG_(malloc_usable_size)(p);
1604 tl_assert(actual_szB >= req_szB);
1605 slop_szB = actual_szB - req_szB;
1608 record_block(tid, p, req_szB, slop_szB, /*exclude_first_entry*/True,
1609 /*maybe_snapshot*/True);
1615 void unrecord_block ( void* p, Bool maybe_snapshot )
1617 // Remove HP_Chunk from malloc_list
1618 HP_Chunk* hc = VG_(HT_remove)(malloc_list, (UWord)p);
1620 return; // must have been a bogus free()
1624 VERB(3, "<<< unrecord_block\n");
1627 // Update statistics.
1630 // Maybe take a peak snapshot, since it's a deallocation.
1631 if (maybe_snapshot) {
1632 maybe_take_snapshot(Peak, "de-PEAK");
1635 // Update heap stats.
1636 update_heap_stats(-hc->req_szB, -clo_heap_admin - hc->slop_szB);
1639 update_XCon(hc->where, -hc->req_szB);
1641 // Maybe take a snapshot.
1642 if (maybe_snapshot) {
1643 maybe_take_snapshot(Normal, "dealloc");
1647 n_ignored_heap_frees++;
1649 VERB(3, "(ignored)\n");
1652 VERB(3, ">>> (-%lu, -%lu)\n", hc->req_szB, hc->slop_szB);
1655 // Actually free the chunk, and the heap block (if necessary)
1656 VG_(free)( hc ); hc = NULL;
1659 // Nb: --ignore-fn is tricky for realloc. If the block's original alloc was
1660 // ignored, but the realloc is not requested to be ignored, and we are
1661 // shrinking the block, then we have to ignore the realloc -- otherwise we
1662 // could end up with negative heap sizes. This isn't a danger if we are
1663 // growing such a block, but for consistency (it also simplifies things) we
1664 // ignore such reallocs as well.
1666 void* realloc_block ( ThreadId tid, void* p_old, SizeT new_req_szB )
1670 SizeT old_req_szB, old_slop_szB, new_slop_szB, new_actual_szB;
1671 XPt *old_where, *new_where;
1672 Bool is_ignored = False;
1674 // Remove the old block
1675 hc = VG_(HT_remove)(malloc_list, (UWord)p_old);
1677 return NULL; // must have been a bogus realloc()
1680 old_req_szB = hc->req_szB;
1681 old_slop_szB = hc->slop_szB;
1683 tl_assert(!clo_pages_as_heap); // Shouldn't be here if --pages-as-heap=yes.
1685 VERB(3, "<<< realloc_block (%lu)\n", new_req_szB);
1688 // Update statistics.
1691 // Maybe take a peak snapshot, if it's (effectively) a deallocation.
1692 if (new_req_szB < old_req_szB) {
1693 maybe_take_snapshot(Peak, "re-PEAK");
1696 // The original malloc was ignored, so we have to ignore the
1702 // Actually do the allocation, if necessary.
1703 if (new_req_szB <= old_req_szB + old_slop_szB) {
1704 // New size is smaller or same; block not moved.
1706 new_slop_szB = old_slop_szB + (old_req_szB - new_req_szB);
1709 // New size is bigger; make new block, copy shared contents, free old.
1710 p_new = VG_(cli_malloc)(VG_(clo_alignment), new_req_szB);
1712 // Nb: if realloc fails, NULL is returned but the old block is not
1713 // touched. What an awful function.
1716 VG_(memcpy)(p_new, p_old, old_req_szB);
1717 VG_(cli_free)(p_old);
1718 new_actual_szB = VG_(malloc_usable_size)(p_new);
1719 tl_assert(new_actual_szB >= new_req_szB);
1720 new_slop_szB = new_actual_szB - new_req_szB;
1725 hc->data = (Addr)p_new;
1726 hc->req_szB = new_req_szB;
1727 hc->slop_szB = new_slop_szB;
1728 old_where = hc->where;
1733 new_where = get_XCon( tid, /*exclude_first_entry*/True);
1734 if (!is_ignored && new_where) {
1735 hc->where = new_where;
1736 update_XCon(old_where, -old_req_szB);
1737 update_XCon(new_where, new_req_szB);
1739 // The realloc itself is ignored.
1742 // Update statistics.
1743 n_ignored_heap_reallocs++;
1748 // Now insert the new hc (with a possibly new 'data' field) into
1749 // malloc_list. If this realloc() did not increase the memory size, we
1750 // will have removed and then re-added hc unnecessarily. But that's ok
1751 // because shrinking a block with realloc() is (presumably) much rarer
1752 // than growing it, and this way simplifies the growing case.
1753 VG_(HT_add_node)(malloc_list, hc);
1757 // Update heap stats.
1758 update_heap_stats(new_req_szB - old_req_szB,
1759 new_slop_szB - old_slop_szB);
1761 // Maybe take a snapshot.
1762 maybe_take_snapshot(Normal, "realloc");
1765 VERB(3, "(ignored)\n");
1768 VERB(3, ">>> (%ld, %ld)\n",
1769 new_req_szB - old_req_szB, new_slop_szB - old_slop_szB);
1776 //------------------------------------------------------------//
1777 //--- malloc() et al replacement wrappers ---//
1778 //------------------------------------------------------------//
1780 static void* ms_malloc ( ThreadId tid, SizeT szB )
1782 return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1785 static void* ms___builtin_new ( ThreadId tid, SizeT szB )
1787 return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1790 static void* ms___builtin_vec_new ( ThreadId tid, SizeT szB )
1792 return alloc_and_record_block( tid, szB, VG_(clo_alignment), /*is_zeroed*/False );
1795 static void* ms_calloc ( ThreadId tid, SizeT m, SizeT szB )
1797 return alloc_and_record_block( tid, m*szB, VG_(clo_alignment), /*is_zeroed*/True );
1800 static void *ms_memalign ( ThreadId tid, SizeT alignB, SizeT szB )
1802 return alloc_and_record_block( tid, szB, alignB, False );
1805 static void ms_free ( ThreadId tid __attribute__((unused)), void* p )
1807 unrecord_block(p, /*maybe_snapshot*/True);
1811 static void ms___builtin_delete ( ThreadId tid, void* p )
1813 unrecord_block(p, /*maybe_snapshot*/True);
1817 static void ms___builtin_vec_delete ( ThreadId tid, void* p )
1819 unrecord_block(p, /*maybe_snapshot*/True);
1823 static void* ms_realloc ( ThreadId tid, void* p_old, SizeT new_szB )
1825 return realloc_block(tid, p_old, new_szB);
1828 static SizeT ms_malloc_usable_size ( ThreadId tid, void* p )
1830 HP_Chunk* hc = VG_(HT_lookup)( malloc_list, (UWord)p );
1832 return ( hc ? hc->req_szB + hc->slop_szB : 0 );
1835 //------------------------------------------------------------//
1836 //--- Page handling ---//
1837 //------------------------------------------------------------//
1840 void ms_record_page_mem ( Addr a, SizeT len )
1842 ThreadId tid = VG_(get_running_tid)();
1844 tl_assert(VG_IS_PAGE_ALIGNED(len));
1845 tl_assert(len >= VKI_PAGE_SIZE);
1846 // Record the first N-1 pages as blocks, but don't do any snapshots.
1847 for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
1848 record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
1849 /*exclude_first_entry*/False, /*maybe_snapshot*/False );
1851 // Record the last page as a block, and maybe do a snapshot afterwards.
1852 record_block( tid, (void*)a, VKI_PAGE_SIZE, /*slop_szB*/0,
1853 /*exclude_first_entry*/False, /*maybe_snapshot*/True );
1857 void ms_unrecord_page_mem( Addr a, SizeT len )
1860 tl_assert(VG_IS_PAGE_ALIGNED(len));
1861 tl_assert(len >= VKI_PAGE_SIZE);
1862 for (end = a + len - VKI_PAGE_SIZE; a < end; a += VKI_PAGE_SIZE) {
1863 unrecord_block((void*)a, /*maybe_snapshot*/False);
1865 unrecord_block((void*)a, /*maybe_snapshot*/True);
1868 //------------------------------------------------------------//
1871 void ms_new_mem_mmap ( Addr a, SizeT len,
1872 Bool rr, Bool ww, Bool xx, ULong di_handle )
1874 tl_assert(VG_IS_PAGE_ALIGNED(len));
1875 ms_record_page_mem(a, len);
1879 void ms_new_mem_startup( Addr a, SizeT len,
1880 Bool rr, Bool ww, Bool xx, ULong di_handle )
1882 // startup maps are always be page-sized, except the trampoline page is
1883 // marked by the core as only being the size of the trampoline itself,
1884 // which is something like 57 bytes. Round it up to page size.
1885 len = VG_PGROUNDUP(len);
1886 ms_record_page_mem(a, len);
1890 void ms_new_mem_brk ( Addr a, SizeT len, ThreadId tid )
1892 tl_assert(VG_IS_PAGE_ALIGNED(len));
1893 ms_record_page_mem(a, len);
1897 void ms_copy_mem_remap( Addr from, Addr to, SizeT len)
1899 tl_assert(VG_IS_PAGE_ALIGNED(len));
1900 ms_unrecord_page_mem(from, len);
1901 ms_record_page_mem(to, len);
1905 void ms_die_mem_munmap( Addr a, SizeT len )
1907 tl_assert(VG_IS_PAGE_ALIGNED(len));
1908 ms_unrecord_page_mem(a, len);
1912 void ms_die_mem_brk( Addr a, SizeT len )
1914 tl_assert(VG_IS_PAGE_ALIGNED(len));
1915 ms_unrecord_page_mem(a, len);
1918 //------------------------------------------------------------//
1920 //------------------------------------------------------------//
1922 // We really want the inlining to occur...
1923 #define INLINE inline __attribute__((always_inline))
1925 static void update_stack_stats(SSizeT stack_szB_delta)
1927 if (stack_szB_delta < 0) tl_assert(stacks_szB >= -stack_szB_delta);
1928 stacks_szB += stack_szB_delta;
1930 update_alloc_stats(stack_szB_delta);
1933 static INLINE void new_mem_stack_2(SizeT len, Char* what)
1935 if (have_started_executing_code) {
1936 VERB(3, "<<< new_mem_stack (%ld)\n", len);
1938 update_stack_stats(len);
1939 maybe_take_snapshot(Normal, what);
1944 static INLINE void die_mem_stack_2(SizeT len, Char* what)
1946 if (have_started_executing_code) {
1947 VERB(3, "<<< die_mem_stack (%ld)\n", -len);
1949 maybe_take_snapshot(Peak, "stkPEAK");
1950 update_stack_stats(-len);
1951 maybe_take_snapshot(Normal, what);
1956 static void new_mem_stack(Addr a, SizeT len)
1958 new_mem_stack_2(len, "stk-new");
1961 static void die_mem_stack(Addr a, SizeT len)
1963 die_mem_stack_2(len, "stk-die");
1966 static void new_mem_stack_signal(Addr a, SizeT len, ThreadId tid)
1968 new_mem_stack_2(len, "sig-new");
1971 static void die_mem_stack_signal(Addr a, SizeT len)
1973 die_mem_stack_2(len, "sig-die");
1977 //------------------------------------------------------------//
1978 //--- Client Requests ---//
1979 //------------------------------------------------------------//
1981 static void print_monitor_help ( void )
1983 VG_(gdb_printf) ("\n");
1984 VG_(gdb_printf) ("massif monitor commands:\n");
1985 VG_(gdb_printf) (" ms.snapshot [<filename>]\n");
1986 VG_(gdb_printf) (" ms.detailed_snapshot [<filename>]\n");
1987 VG_(gdb_printf) (" takes a snapshot (or a detailed snapshot)\n");
1988 VG_(gdb_printf) (" and saves it in <filename>\n");
1989 VG_(gdb_printf) (" default <filename> is massif.vgdb.out\n");
1990 VG_(gdb_printf) ("\n");
1994 /* Forward declaration.
1995 return True if request recognised, False otherwise */
1996 static Bool handle_gdb_monitor_command (ThreadId tid, Char *req);
1997 static Bool ms_handle_client_request ( ThreadId tid, UWord* argv, UWord* ret )
2000 case VG_USERREQ__MALLOCLIKE_BLOCK: {
2001 void* p = (void*)argv[1];
2002 SizeT szB = argv[2];
2003 record_block( tid, p, szB, /*slop_szB*/0, /*exclude_first_entry*/False,
2004 /*maybe_snapshot*/True );
2008 case VG_USERREQ__RESIZEINPLACE_BLOCK: {
2009 void* p = (void*)argv[1];
2010 SizeT newSizeB = argv[3];
2012 unrecord_block(p, /*maybe_snapshot*/True);
2013 record_block(tid, p, newSizeB, /*slop_szB*/0,
2014 /*exclude_first_entry*/False, /*maybe_snapshot*/True);
2017 case VG_USERREQ__FREELIKE_BLOCK: {
2018 void* p = (void*)argv[1];
2019 unrecord_block(p, /*maybe_snapshot*/True);
2023 case VG_USERREQ__GDB_MONITOR_COMMAND: {
2024 Bool handled = handle_gdb_monitor_command (tid, (Char*)argv[1]);
2038 //------------------------------------------------------------//
2039 //--- Instrumentation ---//
2040 //------------------------------------------------------------//
2042 static void add_counter_update(IRSB* sbOut, Int n)
2044 #if defined(VG_BIGENDIAN)
2045 # define END Iend_BE
2046 #elif defined(VG_LITTLEENDIAN)
2047 # define END Iend_LE
2049 # error "Unknown endianness"
2051 // Add code to increment 'guest_instrs_executed' by 'n', like this:
2052 // WrTmp(t1, Load64(&guest_instrs_executed))
2053 // WrTmp(t2, Add64(RdTmp(t1), Const(n)))
2054 // Store(&guest_instrs_executed, t2)
2055 IRTemp t1 = newIRTemp(sbOut->tyenv, Ity_I64);
2056 IRTemp t2 = newIRTemp(sbOut->tyenv, Ity_I64);
2057 IRExpr* counter_addr = mkIRExpr_HWord( (HWord)&guest_instrs_executed );
2059 IRStmt* st1 = IRStmt_WrTmp(t1, IRExpr_Load(END, Ity_I64, counter_addr));
2062 IRExpr_Binop(Iop_Add64, IRExpr_RdTmp(t1),
2063 IRExpr_Const(IRConst_U64(n))));
2064 IRStmt* st3 = IRStmt_Store(END, counter_addr, IRExpr_RdTmp(t2));
2066 addStmtToIRSB( sbOut, st1 );
2067 addStmtToIRSB( sbOut, st2 );
2068 addStmtToIRSB( sbOut, st3 );
2071 static IRSB* ms_instrument2( IRSB* sbIn )
2076 // We increment the instruction count in two places:
2077 // - just before any Ist_Exit statements;
2078 // - just before the IRSB's end.
2079 // In the former case, we zero 'n' and then continue instrumenting.
2081 sbOut = deepCopyIRSBExceptStmts(sbIn);
2083 for (i = 0; i < sbIn->stmts_used; i++) {
2084 IRStmt* st = sbIn->stmts[i];
2086 if (!st || st->tag == Ist_NoOp) continue;
2088 if (st->tag == Ist_IMark) {
2090 } else if (st->tag == Ist_Exit) {
2092 // Add an increment before the Exit statement, then reset 'n'.
2093 add_counter_update(sbOut, n);
2097 addStmtToIRSB( sbOut, st );
2101 // Add an increment before the SB end.
2102 add_counter_update(sbOut, n);
2108 IRSB* ms_instrument ( VgCallbackClosure* closure,
2110 VexGuestLayout* layout,
2111 VexGuestExtents* vge,
2112 IRType gWordTy, IRType hWordTy )
2114 if (! have_started_executing_code) {
2115 // Do an initial sample to guarantee that we have at least one.
2116 // We use 'maybe_take_snapshot' instead of 'take_snapshot' to ensure
2117 // 'maybe_take_snapshot's internal static variables are initialised.
2118 have_started_executing_code = True;
2119 maybe_take_snapshot(Normal, "startup");
2122 if (clo_time_unit == TimeI) { return ms_instrument2(sbIn); }
2123 else if (clo_time_unit == TimeMS) { return sbIn; }
2124 else if (clo_time_unit == TimeB) { return sbIn; }
2125 else { tl_assert2(0, "bad --time-unit value"); }
2129 //------------------------------------------------------------//
2130 //--- Writing snapshots ---//
2131 //------------------------------------------------------------//
2133 Char FP_buf[BUF_LEN];
2135 // XXX: implement f{,n}printf in m_libcprint.c eventually, and use it here.
2136 // Then change Cachegrind to use it too.
2137 #define FP(format, args...) ({ \
2138 VG_(snprintf)(FP_buf, BUF_LEN, format, ##args); \
2139 FP_buf[BUF_LEN-1] = '\0'; /* Make sure the string is terminated. */ \
2140 VG_(write)(fd, (void*)FP_buf, VG_(strlen)(FP_buf)); \
2143 // Nb: uses a static buffer, each call trashes the last string returned.
2144 static Char* make_perc(double x)
2146 static Char mbuf[32];
2148 VG_(percentify)((ULong)(x * 100), 10000, 2, 6, mbuf);
2149 // XXX: this is bogus if the denominator was zero -- resulting string is
2150 // something like "0 --%")
2151 if (' ' == mbuf[0]) mbuf[0] = '0';
2155 static void pp_snapshot_SXPt(Int fd, SXPt* sxpt, Int depth, Char* depth_str,
2157 SizeT snapshot_heap_szB, SizeT snapshot_total_szB)
2159 Int i, j, n_insig_children_sxpts;
2162 // Used for printing function names. Is made static to keep it out
2163 // of the stack frame -- this function is recursive. Obviously this
2164 // now means its contents are trashed across the recursive call.
2165 static Char ip_desc_array[BUF_LEN];
2166 Char* ip_desc = ip_desc_array;
2168 switch (sxpt->tag) {
2170 // Print the SXPt itself.
2175 ? "(page allocation syscalls) mmap/mremap/brk, --alloc-fns, etc."
2176 : "(heap allocation functions) malloc/new/new[], --alloc-fns, etc."
2179 // XXX: --alloc-fns?
2182 // If it's main-or-below-main, we (if appropriate) ignore everything
2183 // below it by pretending it has no children.
2184 if ( ! VG_(clo_show_below_main) ) {
2185 Vg_FnNameKind kind = VG_(get_fnname_kind_from_IP)(sxpt->Sig.ip);
2186 if (Vg_FnNameMain == kind || Vg_FnNameBelowMain == kind) {
2187 sxpt->Sig.n_children = 0;
2191 // We need the -1 to get the line number right, But I'm not sure why.
2192 ip_desc = VG_(describe_IP)(sxpt->Sig.ip-1, ip_desc, BUF_LEN);
2195 // Do the non-ip_desc part first...
2196 FP("%sn%d: %lu ", depth_str, sxpt->Sig.n_children, sxpt->szB);
2198 // For ip_descs beginning with "0xABCD...:" addresses, we first
2199 // measure the length of the "0xabcd: " address at the start of the
2202 if ('0' == ip_desc[0] && 'x' == ip_desc[1]) {
2206 if (':' == ip_desc[j]) break;
2209 tl_assert2(0, "ip_desc has unexpected form: %s\n", ip_desc);
2213 // Nb: We treat this specially (ie. we don't use FP) so that if the
2214 // ip_desc is too long (eg. due to a long C++ function name), it'll
2215 // get truncated, but the '\n' is still there so its a valid file.
2216 // (At one point we were truncating without adding the '\n', which
2217 // caused bug #155929.)
2219 // Also, we account for the length of the address in ip_desc when
2220 // truncating. (The longest address we could have is 18 chars: "0x"
2221 // plus 16 address digits.) This ensures that the truncated function
2222 // name always has the same length, which makes truncation
2223 // deterministic and thus makes testing easier.
2225 VG_(snprintf)(FP_buf, BUF_LEN, "%s\n", ip_desc);
2226 FP_buf[BUF_LEN-18+j-5] = '.'; // "..." at the end make the
2227 FP_buf[BUF_LEN-18+j-4] = '.'; // truncation more obvious.
2228 FP_buf[BUF_LEN-18+j-3] = '.';
2229 FP_buf[BUF_LEN-18+j-2] = '\n'; // The last char is '\n'.
2230 FP_buf[BUF_LEN-18+j-1] = '\0'; // The string is terminated.
2231 VG_(write)(fd, (void*)FP_buf, VG_(strlen)(FP_buf));
2234 tl_assert(depth+1 < depth_str_len-1); // -1 for end NUL char
2235 depth_str[depth+0] = ' ';
2236 depth_str[depth+1] = '\0';
2238 // Sort SXPt's children by szB (reverse order: biggest to smallest).
2239 // Nb: we sort them here, rather than earlier (eg. in dup_XTree), for
2240 // two reasons. First, if we do it during dup_XTree, it can get
2241 // expensive (eg. 15% of execution time for konqueror
2242 // startup/shutdown). Second, this way we get the Insig SXPt (if one
2243 // is present) in its sorted position, not at the end.
2244 VG_(ssort)(sxpt->Sig.children, sxpt->Sig.n_children, sizeof(SXPt*),
2247 // Print the SXPt's children. They should already be in sorted order.
2248 n_insig_children_sxpts = 0;
2249 for (i = 0; i < sxpt->Sig.n_children; i++) {
2250 child = sxpt->Sig.children[i];
2252 if (InsigSXPt == child->tag)
2253 n_insig_children_sxpts++;
2255 // Ok, print the child. NB: contents of ip_desc_array will be
2256 // trashed by this recursive call. Doesn't matter currently,
2257 // but worth noting.
2258 pp_snapshot_SXPt(fd, child, depth+1, depth_str, depth_str_len,
2259 snapshot_heap_szB, snapshot_total_szB);
2263 depth_str[depth+0] = '\0';
2264 depth_str[depth+1] = '\0';
2266 // There should be 0 or 1 Insig children SXPts.
2267 tl_assert(n_insig_children_sxpts <= 1);
2271 Char* s = ( 1 == sxpt->Insig.n_xpts ? "," : "s, all" );
2272 FP("%sn0: %lu in %d place%s below massif's threshold (%s)\n",
2273 depth_str, sxpt->szB, sxpt->Insig.n_xpts, s,
2274 make_perc(clo_threshold));
2279 tl_assert2(0, "pp_snapshot_SXPt: unrecognised SXPt tag");
2283 static void pp_snapshot(Int fd, Snapshot* snapshot, Int snapshot_n)
2285 sanity_check_snapshot(snapshot);
2287 FP("#-----------\n");
2288 FP("snapshot=%d\n", snapshot_n);
2289 FP("#-----------\n");
2290 FP("time=%lld\n", snapshot->time);
2291 FP("mem_heap_B=%lu\n", snapshot->heap_szB);
2292 FP("mem_heap_extra_B=%lu\n", snapshot->heap_extra_szB);
2293 FP("mem_stacks_B=%lu\n", snapshot->stacks_szB);
2295 if (is_detailed_snapshot(snapshot)) {
2296 // Detailed snapshot -- print heap tree.
2297 Int depth_str_len = clo_depth + 3;
2298 Char* depth_str = VG_(malloc)("ms.main.pps.1",
2299 sizeof(Char) * depth_str_len);
2300 SizeT snapshot_total_szB =
2301 snapshot->heap_szB + snapshot->heap_extra_szB + snapshot->stacks_szB;
2302 depth_str[0] = '\0'; // Initialise depth_str to "".
2304 FP("heap_tree=%s\n", ( Peak == snapshot->kind ? "peak" : "detailed" ));
2305 pp_snapshot_SXPt(fd, snapshot->alloc_sxpt, 0, depth_str,
2306 depth_str_len, snapshot->heap_szB,
2307 snapshot_total_szB);
2309 VG_(free)(depth_str);
2312 FP("heap_tree=empty\n");
2316 static void write_snapshots_to_file(Char* massif_out_file,
2317 Snapshot snapshots_array[],
2323 sres = VG_(open)(massif_out_file, VKI_O_CREAT|VKI_O_TRUNC|VKI_O_WRONLY,
2324 VKI_S_IRUSR|VKI_S_IWUSR);
2325 if (sr_isError(sres)) {
2326 // If the file can't be opened for whatever reason (conflict
2327 // between multiple cachegrinded processes?), give up now.
2328 VG_(umsg)("error: can't open output file '%s'\n", massif_out_file );
2329 VG_(umsg)(" ... so profiling results will be missing.\n");
2335 // Print massif-specific options that were used.
2336 // XXX: is it worth having a "desc:" line? Could just call it "options:"
2337 // -- this file format isn't as generic as Cachegrind's, so the
2338 // implied genericity of "desc:" is bogus.
2340 for (i = 0; i < VG_(sizeXA)(args_for_massif); i++) {
2341 Char* arg = *(Char**)VG_(indexXA)(args_for_massif, i);
2344 if (0 == i) FP(" (none)");
2347 // Print "cmd:" line.
2349 if (VG_(args_the_exename)) {
2350 FP("%s", VG_(args_the_exename));
2351 for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
2352 HChar* arg = * (HChar**) VG_(indexXA)( VG_(args_for_client), i );
2361 FP("time_unit: %s\n", TimeUnit_to_string(clo_time_unit));
2363 for (i = 0; i < nr_elements; i++) {
2364 Snapshot* snapshot = & snapshots_array[i];
2365 pp_snapshot(fd, snapshot, i); // Detailed snapshot!
2370 static void write_snapshots_array_to_file(void)
2372 // Setup output filename. Nb: it's important to do this now, ie. as late
2373 // as possible. If we do it at start-up and the program forks and the
2374 // output file format string contains a %p (pid) specifier, both the
2375 // parent and child will incorrectly write to the same file; this
2376 // happened in 3.3.0.
2377 Char* massif_out_file =
2378 VG_(expand_file_name)("--massif-out-file", clo_massif_out_file);
2379 write_snapshots_to_file (massif_out_file, snapshots, next_snapshot_i);
2380 VG_(free)(massif_out_file);
2383 static void handle_snapshot_monitor_command (Char *filename, Bool detailed)
2387 clear_snapshot(&snapshot, /* do_sanity_check */ False);
2388 take_snapshot(&snapshot, Normal, get_time(), detailed);
2389 write_snapshots_to_file ((filename == NULL) ? (Char*) "massif.vgdb.out" : filename,
2392 delete_snapshot(&snapshot);
2395 static Bool handle_gdb_monitor_command (ThreadId tid, Char *req)
2398 Char s[VG_(strlen(req))]; /* copy for strtok_r */
2401 VG_(strcpy) (s, req);
2403 wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
2404 switch (VG_(keyword_id) ("help ms.snapshot ms.detailed_snapshot",
2405 wcmd, kwd_report_duplicated_matches)) {
2406 case -2: /* multiple matches */
2408 case -1: /* not found */
2411 print_monitor_help();
2413 case 1: { /* ms.snapshot */
2415 filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
2416 handle_snapshot_monitor_command (filename, False /* detailed */);
2419 case 2: { /* ms.detailed_snapshot */
2421 filename = VG_(strtok_r) (NULL, " ", &ssaveptr);
2422 handle_snapshot_monitor_command (filename, True /* detailed */);
2431 //------------------------------------------------------------//
2432 //--- Finalisation ---//
2433 //------------------------------------------------------------//
2435 static void ms_fini(Int exit_status)
2438 write_snapshots_array_to_file();
2441 tl_assert(n_xpts > 0); // always have alloc_xpt
2442 STATS("heap allocs: %u\n", n_heap_allocs);
2443 STATS("heap reallocs: %u\n", n_heap_reallocs);
2444 STATS("heap frees: %u\n", n_heap_frees);
2445 STATS("ignored heap allocs: %u\n", n_ignored_heap_allocs);
2446 STATS("ignored heap frees: %u\n", n_ignored_heap_frees);
2447 STATS("ignored heap reallocs: %u\n", n_ignored_heap_reallocs);
2448 STATS("stack allocs: %u\n", n_stack_allocs);
2449 STATS("stack frees: %u\n", n_stack_frees);
2450 STATS("XPts: %u\n", n_xpts);
2451 STATS("top-XPts: %u (%d%%)\n",
2452 alloc_xpt->n_children,
2453 ( n_xpts ? alloc_xpt->n_children * 100 / n_xpts : 0));
2454 STATS("XPt init expansions: %u\n", n_xpt_init_expansions);
2455 STATS("XPt later expansions: %u\n", n_xpt_later_expansions);
2456 STATS("SXPt allocs: %u\n", n_sxpt_allocs);
2457 STATS("SXPt frees: %u\n", n_sxpt_frees);
2458 STATS("skipped snapshots: %u\n", n_skipped_snapshots);
2459 STATS("real snapshots: %u\n", n_real_snapshots);
2460 STATS("detailed snapshots: %u\n", n_detailed_snapshots);
2461 STATS("peak snapshots: %u\n", n_peak_snapshots);
2462 STATS("cullings: %u\n", n_cullings);
2463 STATS("XCon redos: %u\n", n_XCon_redos);
2467 //------------------------------------------------------------//
2468 //--- Initialisation ---//
2469 //------------------------------------------------------------//
2471 static void ms_post_clo_init(void)
2474 Char* LD_PRELOAD_val;
2479 if (clo_pages_as_heap) {
2481 VG_(fmsg_bad_option)(
2482 "--pages-as-heap=yes together with --stacks=yes", "");
2486 clo_pages_as_heap = False;
2489 // If --pages-as-heap=yes we don't want malloc replacement to occur. So we
2490 // disable vgpreload_massif-$PLATFORM.so by removing it from LD_PRELOAD (or
2491 // platform-equivalent). We replace it entirely with spaces because then
2492 // the linker doesn't complain (it does complain if we just change the name
2493 // to a bogus file). This is a bit of a hack, but LD_PRELOAD is setup well
2494 // before tool initialisation, so this seems the best way to do it.
2495 if (clo_pages_as_heap) {
2496 clo_heap_admin = 0; // No heap admin on pages.
2498 LD_PRELOAD_val = VG_(getenv)( (Char*)VG_(LD_PRELOAD_var_name) );
2499 tl_assert(LD_PRELOAD_val);
2501 // Make sure the vgpreload_core-$PLATFORM entry is there, for sanity.
2502 s2 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_core");
2505 // Now find the vgpreload_massif-$PLATFORM entry.
2506 s2 = VG_(strstr)(LD_PRELOAD_val, "vgpreload_massif");
2509 // Blank out everything to the previous ':', which must be there because
2510 // of the preceding vgpreload_core-$PLATFORM entry.
2511 for (s = s2; *s != ':'; s--) {
2515 // Blank out everything to the end of the entry, which will be '\0' if
2516 // LD_PRELOAD was empty before Valgrind started, or ':' otherwise.
2517 for (s = s2; *s != ':' && *s != '\0'; s++) {
2522 // Print alloc-fns and ignore-fns, if necessary.
2523 if (VG_(clo_verbosity) > 1) {
2524 VERB(1, "alloc-fns:\n");
2525 for (i = 0; i < VG_(sizeXA)(alloc_fns); i++) {
2526 Char** fn_ptr = VG_(indexXA)(alloc_fns, i);
2527 VERB(1, " %s\n", *fn_ptr);
2530 VERB(1, "ignore-fns:\n");
2531 if (0 == VG_(sizeXA)(ignore_fns)) {
2532 VERB(1, " <empty>\n");
2534 for (i = 0; i < VG_(sizeXA)(ignore_fns); i++) {
2535 Char** fn_ptr = VG_(indexXA)(ignore_fns, i);
2536 VERB(1, " %d: %s\n", i, *fn_ptr);
2542 VG_(track_new_mem_stack) ( new_mem_stack );
2543 VG_(track_die_mem_stack) ( die_mem_stack );
2544 VG_(track_new_mem_stack_signal) ( new_mem_stack_signal );
2545 VG_(track_die_mem_stack_signal) ( die_mem_stack_signal );
2548 if (clo_pages_as_heap) {
2549 VG_(track_new_mem_startup) ( ms_new_mem_startup );
2550 VG_(track_new_mem_brk) ( ms_new_mem_brk );
2551 VG_(track_new_mem_mmap) ( ms_new_mem_mmap );
2553 VG_(track_copy_mem_remap) ( ms_copy_mem_remap );
2555 VG_(track_die_mem_brk) ( ms_die_mem_brk );
2556 VG_(track_die_mem_munmap) ( ms_die_mem_munmap );
2559 // Initialise snapshot array, and sanity-check it.
2560 snapshots = VG_(malloc)("ms.main.mpoci.1",
2561 sizeof(Snapshot) * clo_max_snapshots);
2562 // We don't want to do snapshot sanity checks here, because they're
2563 // currently uninitialised.
2564 for (i = 0; i < clo_max_snapshots; i++) {
2565 clear_snapshot( & snapshots[i], /*do_sanity_check*/False );
2567 sanity_check_snapshots_array();
2570 static void ms_pre_clo_init(void)
2572 VG_(details_name) ("Massif");
2573 VG_(details_version) (NULL);
2574 VG_(details_description) ("a heap profiler");
2575 VG_(details_copyright_author)(
2576 "Copyright (C) 2003-2010, and GNU GPL'd, by Nicholas Nethercote");
2577 VG_(details_bug_reports_to) (VG_BUGS_TO);
2579 VG_(details_avg_translation_sizeB) ( 330 );
2582 VG_(basic_tool_funcs) (ms_post_clo_init,
2587 VG_(needs_libc_freeres)();
2588 VG_(needs_command_line_options)(ms_process_cmd_line_option,
2590 ms_print_debug_usage);
2591 VG_(needs_client_requests) (ms_handle_client_request);
2592 VG_(needs_sanity_checks) (ms_cheap_sanity_check,
2593 ms_expensive_sanity_check);
2594 VG_(needs_malloc_replacement) (ms_malloc,
2596 ms___builtin_vec_new,
2600 ms___builtin_delete,
2601 ms___builtin_vec_delete,
2603 ms_malloc_usable_size,
2607 malloc_list = VG_(HT_construct)( "Massif's malloc list" );
2609 // Dummy node at top of the context structure.
2610 alloc_xpt = new_XPt(/*ip*/0, /*parent*/NULL);
2612 // Initialise alloc_fns and ignore_fns.
2616 // Initialise args_for_massif.
2617 args_for_massif = VG_(newXA)(VG_(malloc), "ms.main.mprci.1",
2618 VG_(free), sizeof(HChar*));
2621 VG_DETERMINE_INTERFACE_VERSION(ms_pre_clo_init)
2623 //--------------------------------------------------------------------//
2625 //--------------------------------------------------------------------//