2 /*--------------------------------------------------------------------*/
3 /*--- Format-neutral storage of and querying of info acquired from ---*/
4 /*--- ELF/XCOFF stabs/dwarf1/dwarf2/dwarf3 debug info. ---*/
6 /*--------------------------------------------------------------------*/
9 This file is part of Valgrind, a dynamic binary instrumentation
12 Copyright (C) 2000-2010 Julian Seward
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
30 The GNU General Public License is contained in the file COPYING.
33 /* This file manages the data structures built by the debuginfo
34 system. These are: the top level SegInfo list. For each SegInfo,
35 there are tables for for address-to-symbol mappings,
36 address-to-src-file/line mappings, and address-to-CFI-info
40 #include "pub_core_basics.h"
41 #include "pub_core_options.h" /* VG_(clo_verbosity) */
42 #include "pub_core_debuginfo.h"
43 #include "pub_core_libcassert.h"
44 #include "pub_core_libcbase.h"
45 #include "pub_core_libcprint.h"
46 #include "pub_core_xarray.h"
47 #include "pub_core_oset.h"
49 #include "priv_misc.h" /* dinfo_zalloc/free/strdup */
50 #include "priv_d3basics.h" /* ML_(pp_GX) */
51 #include "priv_tytypes.h"
52 #include "priv_storage.h" /* self */
55 /*------------------------------------------------------------*/
56 /*--- Misc (printing, errors) ---*/
57 /*------------------------------------------------------------*/
59 /* Show a non-fatal debug info reading error. Use vg_panic if
60 terminal. 'serious' errors are shown regardless of the
62 void ML_(symerr) ( struct _DebugInfo* di, Bool serious, HChar* msg )
64 /* XML mode hides everything :-( */
70 VG_(message)(Vg_DebugMsg, "WARNING: Serious error when "
71 "reading debug info\n");
72 if (True || VG_(clo_verbosity) < 2) {
73 /* Need to show what the file name is, at verbosity levels 2
74 or below, since that won't already have been shown */
75 VG_(message)(Vg_DebugMsg,
76 "When reading debug info from %s:\n",
77 (di && di->filename) ? di->filename : (UChar*)"???");
79 VG_(message)(Vg_DebugMsg, "%s\n", msg);
81 } else { /* !serious */
83 if (VG_(clo_verbosity) >= 2)
84 VG_(message)(Vg_DebugMsg, "%s\n", msg);
91 void ML_(ppSym) ( Int idx, DiSym* sym )
93 VG_(printf)( "%5d: %#8lx .. %#8lx (%d) %s\n",
96 sym->addr + sym->size - 1, sym->size,
100 /* Print a call-frame-info summary. */
101 void ML_(ppDiCfSI) ( XArray* /* of CfiExpr */ exprs, DiCfSI* si )
103 # define SHOW_HOW(_how, _off) \
105 if (_how == CFIR_UNKNOWN) { \
106 VG_(printf)("Unknown"); \
108 if (_how == CFIR_SAME) { \
109 VG_(printf)("Same"); \
111 if (_how == CFIR_CFAREL) { \
112 VG_(printf)("cfa+%d", _off); \
114 if (_how == CFIR_MEMCFAREL) { \
115 VG_(printf)("*(cfa+%d)", _off); \
117 if (_how == CFIR_EXPR) { \
119 ML_(ppCfiExpr)(exprs, _off); \
126 VG_(printf)("[%#lx .. %#lx]: ", si->base,
127 si->base + (UWord)si->len - 1);
128 switch (si->cfa_how) {
130 VG_(printf)("let cfa=oldSP+%d", si->cfa_off);
133 VG_(printf)("let cfa=oldBP+%d", si->cfa_off);
135 case CFIC_ARM_R13REL:
136 VG_(printf)("let cfa=oldR13+%d", si->cfa_off);
138 case CFIC_ARM_R12REL:
139 VG_(printf)("let cfa=oldR12+%d", si->cfa_off);
141 case CFIC_ARM_R11REL:
142 VG_(printf)("let cfa=oldR11+%d", si->cfa_off);
145 VG_(printf)("let cfa=Same");
148 VG_(printf)("let cfa=oldR7+%d", si->cfa_off);
151 VG_(printf)("let cfa={");
152 ML_(ppCfiExpr)(exprs, si->cfa_off);
159 VG_(printf)(" in RA=");
160 SHOW_HOW(si->ra_how, si->ra_off);
161 # if defined(VGA_x86) || defined(VGA_amd64)
163 SHOW_HOW(si->sp_how, si->sp_off);
165 SHOW_HOW(si->bp_how, si->bp_off);
166 # elif defined(VGA_arm)
167 VG_(printf)(" R14=");
168 SHOW_HOW(si->r14_how, si->r14_off);
169 VG_(printf)(" R13=");
170 SHOW_HOW(si->r13_how, si->r13_off);
171 VG_(printf)(" R12=");
172 SHOW_HOW(si->r12_how, si->r12_off);
173 VG_(printf)(" R11=");
174 SHOW_HOW(si->r11_how, si->r11_off);
176 SHOW_HOW(si->r7_how, si->r7_off);
177 # elif defined(VGA_ppc32) || defined(VGA_ppc64)
178 # elif defined(VGA_s390x)
180 SHOW_HOW(si->sp_how, si->sp_off);
182 SHOW_HOW(si->fp_how, si->fp_off);
184 # error "Unknown arch"
191 /*------------------------------------------------------------*/
192 /*--- Adding stuff ---*/
193 /*------------------------------------------------------------*/
195 /* Add a str to the string table, including terminating zero, and
196 return pointer to the string in vg_strtab. Unless it's been seen
197 recently, in which case we find the old pointer and return that.
198 This avoids the most egregious duplications.
200 JSGF: changed from returning an index to a pointer, and changed to
201 a chunking memory allocator rather than reallocating, so the
204 UChar* ML_(addStr) ( struct _DebugInfo* di, UChar* str, Int len )
206 struct strchunk *chunk;
211 len = VG_(strlen)(str);
216 space_needed = 1 + len;
218 // Allocate a new strtab chunk if necessary
219 if (di->strchunks == NULL ||
220 (di->strchunks->strtab_used
221 + space_needed) > SEGINFO_STRCHUNKSIZE) {
222 chunk = ML_(dinfo_zalloc)("di.storage.addStr.1", sizeof(*chunk));
223 chunk->strtab_used = 0;
224 chunk->next = di->strchunks;
225 di->strchunks = chunk;
227 chunk = di->strchunks;
229 p = &chunk->strtab[chunk->strtab_used];
230 VG_(memcpy)(p, str, len);
231 chunk->strtab[chunk->strtab_used+len] = '\0';
232 chunk->strtab_used += space_needed;
238 /* Add a symbol to the symbol table.
240 void ML_(addSym) ( struct _DebugInfo* di, DiSym* sym )
245 /* Ignore zero-sized syms. */
246 if (sym->size == 0) return;
248 if (di->symtab_used == di->symtab_size) {
249 new_sz = 2 * di->symtab_size;
250 if (new_sz == 0) new_sz = 500;
251 new_tab = ML_(dinfo_zalloc)( "di.storage.addSym.1",
252 new_sz * sizeof(DiSym) );
253 if (di->symtab != NULL) {
254 for (i = 0; i < di->symtab_used; i++)
255 new_tab[i] = di->symtab[i];
256 ML_(dinfo_free)(di->symtab);
258 di->symtab = new_tab;
259 di->symtab_size = new_sz;
262 di->symtab[di->symtab_used] = *sym;
264 vg_assert(di->symtab_used <= di->symtab_size);
268 /* Resize the symbol table to save memory.
270 void ML_(shrinkSym)( struct _DebugInfo* di )
273 UInt new_sz = di->symtab_used;
274 if (new_sz == di->symtab_size) return;
276 new_tab = ML_(dinfo_zalloc)( "di.storage.shrinkSym",
277 new_sz * sizeof(DiSym) );
278 VG_(memcpy)(new_tab, di->symtab, new_sz * sizeof(DiSym));
280 ML_(dinfo_free)(di->symtab);
281 di->symtab = new_tab;
282 di->symtab_size = new_sz;
286 /* Add a location to the location table.
288 static void addLoc ( struct _DebugInfo* di, DiLoc* loc )
293 /* Zero-sized locs should have been ignored earlier */
294 vg_assert(loc->size > 0);
296 if (di->loctab_used == di->loctab_size) {
297 new_sz = 2 * di->loctab_size;
298 if (new_sz == 0) new_sz = 500;
299 new_tab = ML_(dinfo_zalloc)( "di.storage.addLoc.1",
300 new_sz * sizeof(DiLoc) );
301 if (di->loctab != NULL) {
302 for (i = 0; i < di->loctab_used; i++)
303 new_tab[i] = di->loctab[i];
304 ML_(dinfo_free)(di->loctab);
306 di->loctab = new_tab;
307 di->loctab_size = new_sz;
310 di->loctab[di->loctab_used] = *loc;
312 vg_assert(di->loctab_used <= di->loctab_size);
316 /* Resize the lineinfo table to save memory.
318 void ML_(shrinkLineInfo)( struct _DebugInfo* di )
321 UInt new_sz = di->loctab_used;
322 if (new_sz == di->loctab_size) return;
324 new_tab = ML_(dinfo_zalloc)( "di.storage.shrinkLineInfo",
325 new_sz * sizeof(DiLoc) );
326 VG_(memcpy)(new_tab, di->loctab, new_sz * sizeof(DiLoc));
328 ML_(dinfo_free)(di->loctab);
329 di->loctab = new_tab;
330 di->loctab_size = new_sz;
334 /* Top-level place to call to add a source-location mapping entry.
336 void ML_(addLineInfo) ( struct _DebugInfo* di,
338 UChar* dirname, /* NULL == directory is unknown */
342 Int entry /* only needed for debug printing */
345 static const Bool debug = False;
347 Int size = next - this;
349 /* Ignore zero-sized locs */
350 if (this == next) return;
353 VG_(printf)( " src %s %s line %d %#lx-%#lx\n",
354 dirname ? dirname : (UChar*)"(unknown)",
355 filename, lineno, this, next );
357 /* Maximum sanity checking. Some versions of GNU as do a shabby
358 * job with stabs entries; if anything looks suspicious, revert to
359 * a size of 1. This should catch the instruction of interest
360 * (since if using asm-level debug info, one instruction will
361 * correspond to one line, unlike with C-level debug info where
362 * multiple instructions can map to the one line), but avoid
363 * catching any other instructions bogusly. */
365 if (VG_(clo_verbosity) > 2) {
366 VG_(message)(Vg_DebugMsg,
367 "warning: line info addresses out of order "
368 "at entry %d: 0x%lx 0x%lx\n", entry, this, next);
373 if (size > MAX_LOC_SIZE) {
375 VG_(message)(Vg_DebugMsg,
376 "warning: line info address range too large "
377 "at entry %d: %d\n", entry, size);
381 /* Rule out ones which are completely outside the r-x mapped area.
382 See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
383 for background and rationale. */
384 vg_assert(di->have_rx_map && di->have_rw_map);
385 if (next-1 < di->rx_map_avma
386 || this >= di->rx_map_avma + di->rx_map_size ) {
388 VG_(message)(Vg_DebugMsg,
389 "warning: ignoring line info entry falling "
390 "outside current DebugInfo: %#lx %#lx %#lx %#lx\n",
392 di->text_avma + di->text_size,
397 vg_assert(lineno >= 0);
398 if (lineno > MAX_LINENO) {
399 static Bool complained = False;
402 VG_(message)(Vg_UserMsg,
403 "warning: ignoring line info entry with "
404 "huge line number (%d)\n", lineno);
405 VG_(message)(Vg_UserMsg,
406 " Can't handle line numbers "
407 "greater than %d, sorry\n", MAX_LINENO);
408 VG_(message)(Vg_UserMsg,
409 "(Nb: this message is only shown once)\n");
415 loc.size = (UShort)size;
417 loc.filename = filename;
418 loc.dirname = dirname;
420 if (0) VG_(message)(Vg_DebugMsg,
421 "addLoc: addr %#lx, size %d, line %d, file %s\n",
422 this,size,lineno,filename);
428 /* Top-level place to call to add a CFI summary record. The supplied
430 void ML_(addDiCfSI) ( struct _DebugInfo* di, DiCfSI* cfsi_orig )
432 static const Bool debug = False;
437 /* copy the original, so we can mess with it */
438 DiCfSI cfsi = *cfsi_orig;
441 VG_(printf)("adding DiCfSI: ");
442 ML_(ppDiCfSI)(di->cfsi_exprs, &cfsi);
446 vg_assert(cfsi.len > 0);
447 /* If this fails, the implication is you have a single procedure
448 with more than 5 million bytes of code. Which is pretty
449 unlikely. Either that, or the debuginfo reader is somehow
450 broken. 5 million is of course arbitrary; but it's big enough
451 to be bigger than the size of any plausible piece of code that
452 would fall within a single procedure. */
453 vg_assert(cfsi.len < 5000000);
455 vg_assert(di->have_rx_map && di->have_rw_map);
456 /* If we have an empty r-x mapping (is that possible?) then the
457 DiCfSI can't possibly fall inside it. In which case skip. */
458 if (di->rx_map_size == 0)
461 /* Rule out ones which are completely outside the r-x mapped area.
462 See "Comment_Regarding_Text_Range_Checks" elsewhere in this file
463 for background and rationale. */
464 if (cfsi.base + cfsi.len - 1 < di->rx_map_avma
465 || cfsi.base >= di->rx_map_avma + di->rx_map_size) {
466 static Int complaints = 10;
467 if (VG_(clo_trace_cfi) || complaints > 0) {
469 if (VG_(clo_verbosity) > 1) {
472 "warning: DiCfSI %#lx .. %#lx outside segment %#lx .. %#lx\n",
474 cfsi.base + cfsi.len - 1,
476 di->text_avma + di->text_size - 1
479 if (VG_(clo_trace_cfi))
480 ML_(ppDiCfSI)(di->cfsi_exprs, &cfsi);
485 /* Now we know the range is at least partially inside the r-x
486 mapped area. That implies that at least one of the ends of the
487 range falls inside the area. If necessary, clip it so it is
488 completely within the area. If we don't do this,
489 check_CFSI_related_invariants() in debuginfo.c (invariant #2)
491 "Comment_on_IMPORTANT_CFSI_REPRESENTATIONAL_INVARIANTS" in
492 priv_storage.h for background. */
493 if (cfsi.base < di->rx_map_avma) {
494 /* Lower end is outside the mapped area. Hence upper end must
496 if (0) VG_(printf)("XXX truncate lower\n");
497 vg_assert(cfsi.base + cfsi.len - 1 >= di->rx_map_avma);
498 delta = (SSizeT)(di->rx_map_avma - cfsi.base);
499 vg_assert(delta > 0);
500 vg_assert(delta < (SSizeT)cfsi.len);
505 if (cfsi.base + cfsi.len - 1 > di->rx_map_avma + di->rx_map_size - 1) {
506 /* Upper end is outside the mapped area. Hence lower end must be
508 if (0) VG_(printf)("XXX truncate upper\n");
509 vg_assert(cfsi.base <= di->rx_map_avma + di->rx_map_size - 1);
510 delta = (SSizeT)( (cfsi.base + cfsi.len - 1)
511 - (di->rx_map_avma + di->rx_map_size - 1) );
512 vg_assert(delta > 0); vg_assert(delta < (SSizeT)cfsi.len);
518 /* Because: either cfsi was entirely inside the range, in which
519 case we asserted that len > 0 at the start, OR it fell partially
520 inside the range, in which case we reduced it by some size
521 (delta) which is < its original size. */
522 vg_assert(cfsi.len > 0);
524 /* Similar logic applies for the next two assertions. */
525 vg_assert(cfsi.base >= di->rx_map_avma);
526 vg_assert(cfsi.base + cfsi.len - 1
527 <= di->rx_map_avma + di->rx_map_size - 1);
529 if (di->cfsi_used == di->cfsi_size) {
530 new_sz = 2 * di->cfsi_size;
531 if (new_sz == 0) new_sz = 20;
532 new_tab = ML_(dinfo_zalloc)( "di.storage.addDiCfSI.1",
533 new_sz * sizeof(DiCfSI) );
534 if (di->cfsi != NULL) {
535 for (i = 0; i < di->cfsi_used; i++)
536 new_tab[i] = di->cfsi[i];
537 ML_(dinfo_free)(di->cfsi);
540 di->cfsi_size = new_sz;
543 di->cfsi[di->cfsi_used] = cfsi;
545 vg_assert(di->cfsi_used <= di->cfsi_size);
549 Int ML_(CfiExpr_Undef)( XArray* dst )
552 VG_(memset)( &e, 0, sizeof(e) );
554 return (Int)VG_(addToXA)( dst, &e );
556 Int ML_(CfiExpr_Deref)( XArray* dst, Int ixAddr )
559 VG_(memset)( &e, 0, sizeof(e) );
561 e.Cex.Deref.ixAddr = ixAddr;
562 return (Int)VG_(addToXA)( dst, &e );
564 Int ML_(CfiExpr_Const)( XArray* dst, UWord con )
567 VG_(memset)( &e, 0, sizeof(e) );
569 e.Cex.Const.con = con;
570 return (Int)VG_(addToXA)( dst, &e );
572 Int ML_(CfiExpr_Binop)( XArray* dst, CfiOp op, Int ixL, Int ixR )
575 VG_(memset)( &e, 0, sizeof(e) );
578 e.Cex.Binop.ixL = ixL;
579 e.Cex.Binop.ixR = ixR;
580 return (Int)VG_(addToXA)( dst, &e );
582 Int ML_(CfiExpr_CfiReg)( XArray* dst, CfiReg reg )
585 VG_(memset)( &e, 0, sizeof(e) );
587 e.Cex.CfiReg.reg = reg;
588 return (Int)VG_(addToXA)( dst, &e );
590 Int ML_(CfiExpr_DwReg)( XArray* dst, Int reg )
593 VG_(memset)( &e, 0, sizeof(e) );
595 e.Cex.DwReg.reg = reg;
596 return (Int)VG_(addToXA)( dst, &e );
599 static void ppCfiOp ( CfiOp op )
602 case Cop_Add: VG_(printf)("+"); break;
603 case Cop_Sub: VG_(printf)("-"); break;
604 case Cop_And: VG_(printf)("&"); break;
605 case Cop_Mul: VG_(printf)("*"); break;
606 default: vg_assert(0);
610 static void ppCfiReg ( CfiReg reg )
613 case Creg_IA_SP: VG_(printf)("xSP"); break;
614 case Creg_IA_BP: VG_(printf)("xBP"); break;
615 case Creg_IA_IP: VG_(printf)("xIP"); break;
616 case Creg_ARM_R13: VG_(printf)("R13"); break;
617 case Creg_ARM_R12: VG_(printf)("R12"); break;
618 case Creg_ARM_R15: VG_(printf)("R15"); break;
619 case Creg_ARM_R14: VG_(printf)("R14"); break;
620 default: vg_assert(0);
624 void ML_(ppCfiExpr)( XArray* src, Int ix )
626 /* VG_(indexXA) checks for invalid src/ix values, so we can
627 use it indiscriminately. */
628 CfiExpr* e = (CfiExpr*) VG_(indexXA)( src, ix );
631 VG_(printf)("Undef");
635 ML_(ppCfiExpr)(src, e->Cex.Deref.ixAddr);
639 VG_(printf)("0x%lx", e->Cex.Const.con);
643 ML_(ppCfiExpr)(src, e->Cex.Binop.ixL);
645 ppCfiOp(e->Cex.Binop.op);
647 ML_(ppCfiExpr)(src, e->Cex.Binop.ixR);
651 ppCfiReg(e->Cex.CfiReg.reg);
654 VG_(printf)("dwr%d", e->Cex.DwReg.reg);
657 VG_(core_panic)("ML_(ppCfiExpr)");
664 Word ML_(cmp_for_DiAddrRange_range) ( const void* keyV,
665 const void* elemV ) {
666 const Addr* key = (const Addr*)keyV;
667 const DiAddrRange* elem = (const DiAddrRange*)elemV;
669 VG_(printf)("cmp_for_DiAddrRange_range: %#lx vs %#lx\n",
671 if ((*key) < elem->aMin) return -1;
672 if ((*key) > elem->aMax) return 1;
677 void show_scope ( OSet* /* of DiAddrRange */ scope, HChar* who )
680 VG_(printf)("Scope \"%s\" = {\n", who);
681 VG_(OSetGen_ResetIter)( scope );
683 range = VG_(OSetGen_Next)( scope );
685 VG_(printf)(" %#lx .. %#lx: %lu vars\n", range->aMin, range->aMax,
686 range->vars ? VG_(sizeXA)(range->vars) : 0);
691 /* Add the variable 'var' to 'scope' for the address range [aMin,aMax]
692 (inclusive of aMin and aMax). Split existing ranges as required if
693 aMin or aMax or both don't match existing range boundaries, and add
694 'var' to all required ranges. Take great care to preserve the
695 invariant that the ranges in 'scope' cover the entire address range
696 exactly once, with no overlaps and no holes. */
697 static void add_var_to_arange (
698 /*MOD*/OSet* /* of DiAddrRange */ scope,
704 DiAddrRange *first, *last, *range;
705 /* These xx variables are for assertion checking only; they don't
706 contribute anything to the actual work of this function. */
707 DiAddrRange *xxRangep, *xxFirst, *xxLast;
710 vg_assert(aMin <= aMax);
712 if (0) VG_(printf)("add_var_to_arange: %#lx .. %#lx\n", aMin, aMax);
713 if (0) show_scope( scope, "add_var_to_arange(1)" );
715 /* See if the lower end of the range (aMin) falls exactly on an
716 existing range boundary. If not, find the range it does fall
717 into, and split it (copying the variables in the process), so
718 that aMin does exactly fall on a range boundary. */
719 first = VG_(OSetGen_Lookup)( scope, &aMin );
720 /* It must be present, since the presented OSet must cover
721 the entire address range. */
723 vg_assert(first->aMin <= first->aMax);
724 vg_assert(first->aMin <= aMin && aMin <= first->aMax);
726 /* Fast track common case, which is that the range specified for
727 the variable exactly coincides with one already-existing
729 if (first->aMin == aMin && first->aMax == aMax) {
730 vg_assert(first->vars);
731 VG_(addToXA)( first->vars, var );
735 /* We have to get into splitting ranges, which is complex
737 if (first->aMin < aMin) {
739 /* Ok. We'll have to split 'first'. */
740 /* truncate the upper end of 'first' */
741 Addr tmp = first->aMax;
742 first->aMax = aMin-1;
743 vg_assert(first->aMin <= first->aMax);
744 /* create a new range */
745 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
749 vg_assert(nyu->aMin <= nyu->aMax);
750 /* copy vars into it */
751 vg_assert(first->vars);
752 nyu->vars = VG_(cloneXA)( "di.storage.avta.1", first->vars );
753 vg_assert(nyu->vars);
754 VG_(OSetGen_Insert)( scope, nyu );
758 vg_assert(first->aMin == aMin);
760 /* Now do exactly the same for the upper end (aMax): if it doesn't
761 fall on a boundary, cause it to do so by splitting the range it
762 does currently fall into. */
763 last = VG_(OSetGen_Lookup)( scope, &aMax );
764 vg_assert(last->aMin <= last->aMax);
765 vg_assert(last->aMin <= aMax && aMax <= last->aMax);
767 if (aMax < last->aMax) {
769 /* We have to split 'last'. */
770 /* truncate the lower end of 'last' */
771 Addr tmp = last->aMin;
773 vg_assert(last->aMin <= last->aMax);
774 /* create a new range */
775 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
779 vg_assert(nyu->aMin <= nyu->aMax);
780 /* copy vars into it */
781 vg_assert(last->vars);
782 nyu->vars = VG_(cloneXA)( "di.storage.avta.2", last->vars );
783 vg_assert(nyu->vars);
784 VG_(OSetGen_Insert)( scope, nyu );
788 vg_assert(aMax == last->aMax);
790 xxFirst = (DiAddrRange*)VG_(OSetGen_Lookup)(scope, &aMin);
791 xxLast = (DiAddrRange*)VG_(OSetGen_Lookup)(scope, &aMax);
794 vg_assert(xxFirst->aMin == aMin);
795 vg_assert(xxLast->aMax == aMax);
796 if (xxFirst != xxLast)
797 vg_assert(xxFirst->aMax < xxLast->aMin);
799 /* Great. Now we merely need to iterate over the segments from
800 'first' to 'last' inclusive, and add 'var' to the variable set
803 static UWord ctr = 0;
805 VG_(printf)("ctr = %lu\n", ctr);
806 if (ctr >= 33263) show_scope( scope, "add_var_to_arange(2)" );
810 range = xxRangep = NULL;
811 VG_(OSetGen_ResetIterAt)( scope, &aMin );
814 range = VG_(OSetGen_Next)( scope );
816 if (range->aMin > aMax) break;
818 if (0) VG_(printf)("have range %#lx %#lx\n",
819 range->aMin, range->aMax);
823 /* This is the first in the range */
824 vg_assert(range->aMin == aMin);
826 vg_assert(xxRangep->aMax + 1 == range->aMin);
829 vg_assert(range->vars);
830 VG_(addToXA)( range->vars, var );
832 /* Done. We should have seen at least one range. */
833 vg_assert(xxIters >= 1);
834 if (xxIters == 1) vg_assert(xxFirst == xxLast);
835 if (xxFirst == xxLast) vg_assert(xxIters == 1);
837 vg_assert(xxRangep->aMax == aMax);
838 vg_assert(xxRangep == xxLast);
842 /* Top-level place to call to add a variable description (as extracted
843 from a DWARF3 .debug_info section. */
844 void ML_(addVar)( struct _DebugInfo* di,
848 UChar* name, /* in di's .strchunks */
849 UWord typeR, /* a cuOff */
852 UChar* fileName, /* where decl'd - may be NULL.
853 in di's .strchunks */
854 Int lineNo, /* where decl'd - may be zero */
857 OSet* /* of DiAddrRange */ scope;
864 tl_assert(di && di->admin_tyents);
867 VG_(printf)(" ML_(addVar): level %d %#lx-%#lx %s :: ",
868 level, aMin, aMax, name );
869 ML_(pp_TyEnt_C_ishly)( di->admin_tyents, typeR );
870 VG_(printf)("\n Var=");
874 VG_(printf)(" FrB=");
878 VG_(printf)(" FrB=none\n");
883 vg_assert(level >= 0);
884 vg_assert(aMin <= aMax);
888 ent = ML_(TyEnts__index_by_cuOff)( di->admin_tyents, NULL, typeR);
890 vg_assert(ML_(TyEnt__is_type)(ent));
892 /* "Comment_Regarding_Text_Range_Checks" (is referred to elsewhere)
893 ----------------------------------------------------------------
894 Ignore any variables whose aMin .. aMax (that is, range of text
895 addresses for which they actually exist) falls outside the text
896 segment. Is this indicative of a bug in the reader? Maybe.
897 (LATER): instead of restricting strictly to the .text segment,
898 be a bit more relaxed, and accept any variable whose text range
899 falls inside the r-x mapped area. This is useful because .text
900 is not always the only instruction-carrying segment: others are:
901 .init .plt __libc_freeres_fn and .fini. This implicitly assumes
902 that those extra sections have the same bias as .text, but that
903 seems a reasonable assumption to me. */
904 /* This is assured us by top level steering logic in debuginfo.c,
905 and it is re-checked at the start of
906 ML_(read_elf_debug_info). */
907 vg_assert(di->have_rx_map && di->have_rw_map);
909 && (aMax < di->rx_map_avma
910 || aMin >= di->rx_map_avma + di->rx_map_size)) {
911 if (VG_(clo_verbosity) >= 0) {
912 VG_(message)(Vg_DebugMsg,
913 "warning: addVar: in range %#lx .. %#lx outside "
914 "segment %#lx .. %#lx (%s)\n",
916 di->text_avma, di->text_avma + di->text_size -1,
923 /* If the type's size is zero (which can mean unknown size), ignore
924 it. We will never be able to actually relate a data address to
925 a data object with zero size, so there's no point in storing
926 info on it. On 32-bit platforms, also reject types whose size
927 is 2^32 bytes or large. (It's amazing what junk shows up ..) */
928 mul = ML_(sizeOfType)(di->admin_tyents, typeR);
932 badness = "unknown size";
933 else if (mul.ul == 0)
934 badness = "zero size ";
935 else if (sizeof(void*) == 4 && mul.ul >= (1ULL<<32))
936 badness = "implausibly large";
939 static Int complaints = 10;
940 if (VG_(clo_verbosity) >= 2 && complaints > 0) {
941 VG_(message)(Vg_DebugMsg, "warning: addVar: %s (%s)\n",
949 di->varinfo = VG_(newXA)( ML_(dinfo_zalloc),
950 "di.storage.addVar.1",
955 vg_assert(level < 256); /* arbitrary; stay sane */
956 /* Expand the top level array enough to map this level */
957 while ( VG_(sizeXA)(di->varinfo) <= level ) {
959 scope = VG_(OSetGen_Create)( offsetof(DiAddrRange,aMin),
960 ML_(cmp_for_DiAddrRange_range),
961 ML_(dinfo_zalloc), "di.storage.addVar.2",
964 if (0) VG_(printf)("create: scope = %p, adding at %ld\n",
965 scope, VG_(sizeXA)(di->varinfo));
966 VG_(addToXA)( di->varinfo, &scope );
967 /* Add a single range covering the entire address space. At
968 level 0 we require this doesn't get split. At levels above 0
969 we require that any additions to it cause it to get split.
970 All of these invariants get checked both add_var_to_arange
971 and after reading is complete, in canonicaliseVarInfo. */
972 nyu = VG_(OSetGen_AllocNode)( scope, sizeof(DiAddrRange) );
975 nyu->aMax = ~(Addr)0;
976 nyu->vars = VG_(newXA)( ML_(dinfo_zalloc), "di.storage.addVar.3",
978 sizeof(DiVariable) );
979 vg_assert(nyu->vars);
980 VG_(OSetGen_Insert)( scope, nyu );
983 vg_assert( VG_(sizeXA)(di->varinfo) > level );
984 scope = *(OSet**)VG_(indexXA)( di->varinfo, level );
991 var.fileName = fileName;
994 all = aMin == (Addr)0 && aMax == ~(Addr)0;
995 vg_assert(level == 0 ? all : !all);
997 add_var_to_arange( /*MOD*/scope, aMin, aMax, &var );
1001 /* This really just checks the constructed data structure, as there is
1002 no canonicalisation to do. */
1003 static void canonicaliseVarInfo ( struct _DebugInfo* di )
1005 Word i, nInThisScope;
1010 for (i = 0; i < VG_(sizeXA)(di->varinfo); i++) {
1012 DiAddrRange *range, *rangep;
1013 OSet* scope = *(OSet**)VG_(indexXA)(di->varinfo, i);
1014 if (!scope) continue;
1016 /* Deal with the global-scope case. */
1019 vg_assert(VG_(OSetGen_Size)( scope ) == 1);
1020 range = VG_(OSetGen_Lookup)( scope, &zero );
1022 vg_assert(range->aMin == (Addr)0);
1023 vg_assert(range->aMax == ~(Addr)0);
1027 /* All the rest of this is for the local-scope case. */
1028 /* iterate over all entries in 'scope' */
1031 VG_(OSetGen_ResetIter)(scope);
1033 range = VG_(OSetGen_Next)(scope);
1035 /* We just saw the last one. There must have been at
1036 least one entry in the range. */
1038 vg_assert(rangep->aMax == ~(Addr)0);
1042 vg_assert(range->aMin <= range->aMax);
1043 vg_assert(range->vars);
1046 /* This is the first entry in the range. */
1047 vg_assert(range->aMin == 0);
1049 vg_assert(rangep->aMax + 1 == range->aMin);
1054 } /* iterating over ranges in a given scope */
1056 /* If there's only one entry in this (local) scope, it must
1057 cover the entire address space (obviously), but it must not
1058 contain any vars. */
1060 vg_assert(nInThisScope > 0);
1061 if (nInThisScope == 1) {
1063 vg_assert(VG_(OSetGen_Size)( scope ) == 1);
1064 range = VG_(OSetGen_Lookup)( scope, &zero );
1066 vg_assert(range->aMin == (Addr)0);
1067 vg_assert(range->aMax == ~(Addr)0);
1068 vg_assert(range->vars);
1069 vg_assert(VG_(sizeXA)(range->vars) == 0);
1072 } /* iterate over scopes */
1076 /*------------------------------------------------------------*/
1077 /*--- Canonicalisers ---*/
1078 /*------------------------------------------------------------*/
1080 /* Sort the symtab by starting address, and emit warnings if any
1081 symbols have overlapping address ranges. We use that old chestnut,
1082 shellsort. Mash the table around so as to establish the property
1083 that addresses are in order and the ranges to not overlap. This
1084 facilitates using binary search to map addresses to symbols when we
1085 come to query the table.
1087 static Int compare_DiSym ( void* va, void* vb )
1089 DiSym* a = (DiSym*)va;
1090 DiSym* b = (DiSym*)vb;
1091 if (a->addr < b->addr) return -1;
1092 if (a->addr > b->addr) return 1;
1097 /* Two symbols have the same address. Which name do we prefer? In order:
1099 - Prefer "PMPI_<foo>" over "MPI_<foo>".
1101 - Else, prefer a non-NULL name over a NULL one.
1103 - Else, prefer a non-whitespace name over an all-whitespace name.
1105 - Else, prefer the shorter symbol name. If the symbol contains a
1106 version symbol ('@' on Linux, other platforms may differ), which means it
1107 is versioned, then the length up to the version symbol is used for length
1108 comparison purposes (so "foo@GLIBC_2.4.2" is considered shorter than
1111 - Else, if two symbols have the same length, prefer a versioned symbol over
1112 a non-versioned symbol.
1114 - Else, use alphabetical ordering.
1116 - Otherwise, they must be the same; use the symbol with the lower address.
1118 Very occasionally this goes wrong (eg. 'memcmp' and 'bcmp' are
1119 aliases in glibc, we choose the 'bcmp' symbol because it's shorter,
1120 so we can misdescribe memcmp() as bcmp()). This is hard to avoid.
1121 It's mentioned in the FAQ file.
1123 static DiSym* prefersym ( struct _DebugInfo* di, DiSym* a, DiSym* b )
1126 Word vlena, vlenb; /* length without version */
1127 const UChar *vpa, *vpb;
1129 Bool preferA = False;
1130 Bool preferB = False;
1132 vg_assert(a->addr == b->addr);
1134 vlena = VG_(strlen)(a->name);
1135 vlenb = VG_(strlen)(b->name);
1137 #if defined(VGO_linux) || defined(VGO_aix5) || defined(VGO_l4re)
1138 # define VERSION_CHAR '@'
1139 #elif defined(VGO_darwin)
1140 # define VERSION_CHAR '$'
1145 vpa = VG_(strchr)(a->name, VERSION_CHAR);
1146 vpb = VG_(strchr)(b->name, VERSION_CHAR);
1149 vlena = vpa - a->name;
1151 vlenb = vpb - b->name;
1153 /* MPI hack: prefer PMPI_Foo over MPI_Foo */
1154 if (0==VG_(strncmp)(a->name, "MPI_", 4)
1155 && 0==VG_(strncmp)(b->name, "PMPI_", 5)
1156 && 0==VG_(strcmp)(a->name, 1+b->name)) {
1157 preferB = True; goto out;
1159 if (0==VG_(strncmp)(b->name, "MPI_", 4)
1160 && 0==VG_(strncmp)(a->name, "PMPI_", 5)
1161 && 0==VG_(strcmp)(b->name, 1+a->name)) {
1162 preferA = True; goto out;
1165 /* Prefer non-empty name. */
1166 if (vlena && !vlenb) {
1167 preferA = True; goto out;
1169 if (vlenb && !vlena) {
1170 preferB = True; goto out;
1173 /* Prefer non-whitespace name. */
1180 if (!VG_(isspace)(*s++)) {
1187 if (!VG_(isspace)(*s++)) {
1193 if (!blankA && blankB) {
1194 preferA = True; goto out;
1196 if (!blankB && blankA) {
1197 preferB = True; goto out;
1201 /* Select the shortest unversioned name */
1202 if (vlena < vlenb) {
1203 preferA = True; goto out;
1205 if (vlenb < vlena) {
1206 preferB = True; goto out;
1209 /* Equal lengths; select the versioned name */
1211 preferA = True; goto out;
1214 preferB = True; goto out;
1217 /* Either both versioned or neither is versioned; select them
1219 cmp = VG_(strcmp)(a->name, b->name);
1221 preferA = True; goto out;
1224 preferB = True; goto out;
1227 /* If we get here, they are the same name. */
1229 /* In this case we could choose either (arbitrarily), but might as
1230 well choose the one with the lowest DiSym* address, so as to try
1231 and make the comparison mechanism more stable (a la sorting
1232 parlance). Also, skip the diagnostic printing in this case. */
1233 return a <= b ? a : b;
1238 if (preferA && !preferB) {
1239 TRACE_SYMTAB("sym at %#lx: prefer '%s' to '%s'\n",
1240 a->addr, a->name, b->name );
1243 if (preferB && !preferA) {
1244 TRACE_SYMTAB("sym at %#lx: prefer '%s' to '%s'\n",
1245 b->addr, b->name, a->name );
1252 static void canonicaliseSymtab ( struct _DebugInfo* di )
1254 Word i, j, n_merged, n_truncated;
1255 Addr s1, s2, e1, e2, p1, p2;
1257 Bool t1, t2, f1, f2;
1259 # define SWAP(ty,aa,bb) \
1260 do { ty tt = (aa); (aa) = (bb); (bb) = tt; } while (0)
1262 if (di->symtab_used == 0)
1265 VG_(ssort)(di->symtab, di->symtab_used,
1266 sizeof(*di->symtab), compare_DiSym);
1270 /* If two symbols have identical address ranges, we pick one
1271 using prefersym() (see it for details). */
1274 j = di->symtab_used;
1275 di->symtab_used = 0;
1276 for (i = 0; i < j; i++) {
1278 && di->symtab[i].addr == di->symtab[i+1].addr
1279 && di->symtab[i].size == di->symtab[i+1].size
1282 /* merge the two into one */
1283 di->symtab[di->symtab_used++]
1284 = *prefersym(di, &di->symtab[i], &di->symtab[i+1]);
1287 di->symtab[di->symtab_used++] = di->symtab[i];
1290 TRACE_SYMTAB( "canonicaliseSymtab: %ld symbols merged\n", n_merged);
1292 while (n_merged > 0);
1294 /* Detect and "fix" overlapping address ranges. */
1297 for (i = 0; i < ((Word)di->symtab_used) -1; i++) {
1299 vg_assert(di->symtab[i].addr <= di->symtab[i+1].addr);
1301 /* Check for common (no overlap) case. */
1302 if (di->symtab[i].addr + di->symtab[i].size
1303 <= di->symtab[i+1].addr)
1306 /* There's an overlap. Truncate one or the other. */
1307 if (di->trace_symtab) {
1308 VG_(printf)("overlapping address ranges in symbol table\n\t");
1309 ML_(ppSym)( i, &di->symtab[i] );
1311 ML_(ppSym)( i+1, &di->symtab[i+1] );
1315 /* Truncate one or the other. */
1316 s1 = di->symtab[i].addr;
1317 e1 = s1 + di->symtab[i].size - 1;
1318 p1 = di->symtab[i].tocptr;
1319 n1 = di->symtab[i].name;
1320 t1 = di->symtab[i].isText;
1321 f1 = di->symtab[i].isIFunc;
1322 s2 = di->symtab[i+1].addr;
1323 e2 = s2 + di->symtab[i+1].size - 1;
1324 p2 = di->symtab[i+1].tocptr;
1325 n2 = di->symtab[i+1].name;
1326 t2 = di->symtab[i+1].isText;
1327 f2 = di->symtab[i+1].isIFunc;
1331 vg_assert(s1 == s2);
1333 s1 = e2+1; SWAP(Addr,s1,s2); SWAP(Addr,e1,e2); SWAP(Addr,p1,p2);
1334 SWAP(UChar *,n1,n2); SWAP(Bool,t1,t2);
1339 /* e1 == e2. Identical addr ranges. We'll eventually wind
1340 up back at cleanup_more, which will take care of it. */
1343 di->symtab[i].addr = s1;
1344 di->symtab[i].size = e1 - s1 + 1;
1345 di->symtab[i].tocptr = p1;
1346 di->symtab[i].name = n1;
1347 di->symtab[i].isText = t1;
1348 di->symtab[i].isIFunc = f1;
1349 di->symtab[i+1].addr = s2;
1350 di->symtab[i+1].size = e2 - s2 + 1;
1351 di->symtab[i+1].tocptr = p2;
1352 di->symtab[i+1].name = n2;
1353 di->symtab[i+1].isText = t2;
1354 di->symtab[i+1].isIFunc = f2;
1355 vg_assert(s1 <= s2);
1356 vg_assert(di->symtab[i].size > 0);
1357 vg_assert(di->symtab[i+1].size > 0);
1358 /* It may be that the i+1 entry now needs to be moved further
1359 along to maintain the address order requirement. */
1361 while (j < ((Word)di->symtab_used)-1
1362 && di->symtab[j].addr > di->symtab[j+1].addr) {
1363 SWAP(DiSym,di->symtab[j],di->symtab[j+1]);
1369 if (n_truncated > 0) goto cleanup_more;
1371 /* Ensure relevant postconditions hold. */
1372 for (i = 0; i < ((Word)di->symtab_used)-1; i++) {
1373 /* No zero-sized symbols. */
1374 vg_assert(di->symtab[i].size > 0);
1376 vg_assert(di->symtab[i].addr < di->symtab[i+1].addr);
1378 vg_assert(di->symtab[i].addr + di->symtab[i].size - 1
1379 < di->symtab[i+1].addr);
1385 /* Sort the location table by starting address. Mash the table around
1386 so as to establish the property that addresses are in order and the
1387 ranges do not overlap. This facilitates using binary search to map
1388 addresses to locations when we come to query the table.
1390 static Int compare_DiLoc ( void* va, void* vb )
1392 DiLoc* a = (DiLoc*)va;
1393 DiLoc* b = (DiLoc*)vb;
1394 if (a->addr < b->addr) return -1;
1395 if (a->addr > b->addr) return 1;
1399 static void canonicaliseLoctab ( struct _DebugInfo* di )
1403 # define SWAP(ty,aa,bb) \
1404 do { ty tt = (aa); (aa) = (bb); (bb) = tt; } while (0);
1406 if (di->loctab_used == 0)
1409 /* Sort by start address. */
1410 VG_(ssort)(di->loctab, di->loctab_used,
1411 sizeof(*di->loctab), compare_DiLoc);
1413 /* If two adjacent entries overlap, truncate the first. */
1414 for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
1415 vg_assert(di->loctab[i].size < 10000);
1416 if (di->loctab[i].addr + di->loctab[i].size > di->loctab[i+1].addr) {
1417 /* Do this in signed int32 because the actual .size fields
1418 are only 12 bits. */
1419 Int new_size = di->loctab[i+1].addr - di->loctab[i].addr;
1421 di->loctab[i].size = 0;
1423 if (new_size > MAX_LOC_SIZE) {
1424 di->loctab[i].size = MAX_LOC_SIZE;
1426 di->loctab[i].size = (UShort)new_size;
1431 /* Zap any zero-sized entries resulting from the truncation
1434 for (i = 0; i < (Word)di->loctab_used; i++) {
1435 if (di->loctab[i].size > 0) {
1437 di->loctab[j] = di->loctab[i];
1441 di->loctab_used = j;
1443 /* Ensure relevant postconditions hold. */
1444 for (i = 0; i < ((Word)di->loctab_used)-1; i++) {
1446 VG_(printf)("%d (%d) %d 0x%x\n",
1447 i, di->loctab[i+1].confident,
1448 di->loctab[i+1].size, di->loctab[i+1].addr );
1450 /* No zero-sized symbols. */
1451 vg_assert(di->loctab[i].size > 0);
1453 vg_assert(di->loctab[i].addr < di->loctab[i+1].addr);
1455 vg_assert(di->loctab[i].addr + di->loctab[i].size - 1
1456 < di->loctab[i+1].addr);
1462 /* Sort the call-frame-info table by starting address. Mash the table
1463 around so as to establish the property that addresses are in order
1464 and the ranges do not overlap. This facilitates using binary
1465 search to map addresses to locations when we come to query the
1468 Also, set cfisi_minaddr and cfisi_maxaddr to be the min and max of
1469 any of the address ranges contained in cfisi[0 .. cfisi_used-1], so
1470 as to facilitate rapidly skipping this SegInfo when looking for an
1471 address which falls outside that range.
1473 static Int compare_DiCfSI ( void* va, void* vb )
1475 DiCfSI* a = (DiCfSI*)va;
1476 DiCfSI* b = (DiCfSI*)vb;
1477 if (a->base < b->base) return -1;
1478 if (a->base > b->base) return 1;
1482 void ML_(canonicaliseCFI) ( struct _DebugInfo* di )
1485 const Addr minAvma = 0;
1486 const Addr maxAvma = ~minAvma;
1488 /* Note: take care in here. di->cfsi can be NULL, in which
1489 case _used and _size fields will be zero. */
1490 if (di->cfsi == NULL) {
1491 vg_assert(di->cfsi_used == 0);
1492 vg_assert(di->cfsi_size == 0);
1495 /* Set cfsi_minavma and cfsi_maxavma to summarise the entire
1496 address range contained in cfsi[0 .. cfsi_used-1]. */
1497 di->cfsi_minavma = maxAvma;
1498 di->cfsi_maxavma = minAvma;
1499 for (i = 0; i < (Word)di->cfsi_used; i++) {
1500 Addr here_min = di->cfsi[i].base;
1501 Addr here_max = di->cfsi[i].base + di->cfsi[i].len - 1;
1502 if (here_min < di->cfsi_minavma)
1503 di->cfsi_minavma = here_min;
1504 if (here_max > di->cfsi_maxavma)
1505 di->cfsi_maxavma = here_max;
1509 VG_(printf)("canonicaliseCfiSI: %ld entries, %#lx .. %#lx\n",
1511 di->cfsi_minavma, di->cfsi_maxavma);
1513 /* Sort the cfsi array by base address. */
1514 VG_(ssort)(di->cfsi, di->cfsi_used, sizeof(*di->cfsi), compare_DiCfSI);
1516 /* If two adjacent entries overlap, truncate the first. */
1517 for (i = 0; i < (Word)di->cfsi_used-1; i++) {
1518 if (di->cfsi[i].base + di->cfsi[i].len > di->cfsi[i+1].base) {
1519 Word new_len = di->cfsi[i+1].base - di->cfsi[i].base;
1520 /* how could it be otherwise? The entries are sorted by the
1522 vg_assert(new_len >= 0);
1523 vg_assert(new_len <= di->cfsi[i].len);
1524 di->cfsi[i].len = new_len;
1528 /* Zap any zero-sized entries resulting from the truncation
1531 for (i = 0; i < (Word)di->cfsi_used; i++) {
1532 if (di->cfsi[i].len > 0) {
1534 di->cfsi[j] = di->cfsi[i];
1538 /* VG_(printf)("XXXXXXXXXXXXX %d %d\n", di->cfsi_used, j); */
1541 /* Ensure relevant postconditions hold. */
1542 for (i = 0; i < (Word)di->cfsi_used; i++) {
1543 /* No zero-length ranges. */
1544 vg_assert(di->cfsi[i].len > 0);
1545 /* Makes sense w.r.t. summary address range */
1546 vg_assert(di->cfsi[i].base >= di->cfsi_minavma);
1547 vg_assert(di->cfsi[i].base + di->cfsi[i].len - 1
1548 <= di->cfsi_maxavma);
1550 if (i < di->cfsi_used - 1) {
1552 if (!(di->cfsi[i].base < di->cfsi[i+1].base)) {
1553 VG_(printf)("\nOOO cfsis:\n");
1554 ML_(ppCfiSI)(&di->cfsi[i]);
1555 ML_(ppCfiSI)(&di->cfsi[i+1]);
1559 vg_assert(di->cfsi[i].base < di->cfsi[i+1].base);
1561 vg_assert(di->cfsi[i].base + di->cfsi[i].len - 1
1562 < di->cfsi[i+1].base);
1569 /* Canonicalise the tables held by 'di', in preparation for use. Call
1570 this after finishing adding entries to these tables. */
1571 void ML_(canonicaliseTables) ( struct _DebugInfo* di )
1573 canonicaliseSymtab ( di );
1574 canonicaliseLoctab ( di );
1575 ML_(canonicaliseCFI) ( di );
1576 canonicaliseVarInfo ( di );
1580 /*------------------------------------------------------------*/
1581 /*--- Searching the tables ---*/
1582 /*------------------------------------------------------------*/
1584 /* Find a symbol-table index containing the specified pointer, or -1
1585 if not found. Binary search. */
1587 Word ML_(search_one_symtab) ( struct _DebugInfo* di, Addr ptr,
1588 Bool match_anywhere_in_sym,
1591 Addr a_mid_lo, a_mid_hi;
1594 hi = di->symtab_used-1;
1596 /* current unsearched space is from lo to hi, inclusive. */
1597 if (lo > hi) return -1; /* not found */
1598 mid = (lo + hi) / 2;
1599 a_mid_lo = di->symtab[mid].addr;
1600 size = ( match_anywhere_in_sym
1601 ? di->symtab[mid].size
1603 a_mid_hi = ((Addr)di->symtab[mid].addr) + size - 1;
1605 if (ptr < a_mid_lo) { hi = mid-1; continue; }
1606 if (ptr > a_mid_hi) { lo = mid+1; continue; }
1607 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
1608 /* Found a symbol with the correct address range. But is it
1609 of the right kind (text vs data) ? */
1610 if ( findText && di->symtab[mid].isText ) return mid;
1611 if ( (!findText) && (!di->symtab[mid].isText) ) return mid;
1617 /* Find a location-table index containing the specified pointer, or -1
1618 if not found. Binary search. */
1620 Word ML_(search_one_loctab) ( struct _DebugInfo* di, Addr ptr )
1622 Addr a_mid_lo, a_mid_hi;
1625 hi = di->loctab_used-1;
1627 /* current unsearched space is from lo to hi, inclusive. */
1628 if (lo > hi) return -1; /* not found */
1629 mid = (lo + hi) / 2;
1630 a_mid_lo = di->loctab[mid].addr;
1631 a_mid_hi = ((Addr)di->loctab[mid].addr) + di->loctab[mid].size - 1;
1633 if (ptr < a_mid_lo) { hi = mid-1; continue; }
1634 if (ptr > a_mid_hi) { lo = mid+1; continue; }
1635 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
1641 /* Find a CFI-table index containing the specified pointer, or -1
1642 if not found. Binary search. */
1644 Word ML_(search_one_cfitab) ( struct _DebugInfo* di, Addr ptr )
1646 Addr a_mid_lo, a_mid_hi;
1649 hi = di->cfsi_used-1;
1651 /* current unsearched space is from lo to hi, inclusive. */
1652 if (lo > hi) return -1; /* not found */
1653 mid = (lo + hi) / 2;
1654 a_mid_lo = di->cfsi[mid].base;
1655 size = di->cfsi[mid].len;
1656 a_mid_hi = a_mid_lo + size - 1;
1657 vg_assert(a_mid_hi >= a_mid_lo);
1658 if (ptr < a_mid_lo) { hi = mid-1; continue; }
1659 if (ptr > a_mid_hi) { lo = mid+1; continue; }
1660 vg_assert(ptr >= a_mid_lo && ptr <= a_mid_hi);
1666 /* Find a FPO-table index containing the specified pointer, or -1
1667 if not found. Binary search. */
1669 Word ML_(search_one_fpotab) ( struct _DebugInfo* di, Addr ptr )
1671 Addr const addr = ptr - di->rx_map_avma;
1672 Addr a_mid_lo, a_mid_hi;
1675 hi = di->fpo_size-1;
1677 /* current unsearched space is from lo to hi, inclusive. */
1678 if (lo > hi) return -1; /* not found */
1679 mid = (lo + hi) / 2;
1680 a_mid_lo = di->fpo[mid].ulOffStart;
1681 size = di->fpo[mid].cbProcSize;
1682 a_mid_hi = a_mid_lo + size - 1;
1683 vg_assert(a_mid_hi >= a_mid_lo);
1684 if (addr < a_mid_lo) { hi = mid-1; continue; }
1685 if (addr > a_mid_hi) { lo = mid+1; continue; }
1686 vg_assert(addr >= a_mid_lo && addr <= a_mid_hi);
1691 /*--------------------------------------------------------------------*/
1693 /*--------------------------------------------------------------------*/