2 /*--------------------------------------------------------------------*/
3 /*--- Management, printing, etc, of errors and suppressions. ---*/
4 /*--- mc_errors.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2010 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_hashtable.h" // For mc_include.h
34 #include "pub_tool_libcbase.h"
35 #include "pub_tool_libcassert.h"
36 #include "pub_tool_libcprint.h"
37 #include "pub_tool_machine.h"
38 #include "pub_tool_mallocfree.h"
39 #include "pub_tool_options.h"
40 #include "pub_tool_replacemalloc.h"
41 #include "pub_tool_tooliface.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
44 #include "pub_tool_xarray.h"
47 #include "mc_include.h"
50 /*------------------------------------------------------------*/
51 /*--- Error types ---*/
52 /*------------------------------------------------------------*/
54 /* See comment in mc_include.h */
55 Bool MC_(any_value_errors) = False;
58 // Different kinds of blocks.
67 /* ------------------ Addresses -------------------- */
69 /* The classification of a faulting address. */
72 Addr_Undescribed, // as-yet unclassified
73 Addr_Unknown, // classification yielded nothing useful
74 Addr_Block, // in malloc'd/free'd block
75 Addr_Stack, // on a thread's stack
76 Addr_DataSym, // in a global data sym
77 Addr_Variable, // variable described by the debug info
78 Addr_SectKind // last-ditch classification attempt
89 // As-yet unclassified.
90 struct { } Undescribed;
94 ThreadId tid; // Which thread's stack?
97 // This covers heap blocks (normal and from mempools) and user-defined
100 BlockKind block_kind;
101 Char* block_desc; // "block", "mempool" or user-defined
104 ExeContext* lastchange;
107 // In a global .data symbol. This holds the first 127 chars of
108 // the variable's name (zero terminated), plus a (memory) offset.
114 // Is described by Dwarf debug info. XArray*s of HChar.
116 XArray* /* of HChar */ descr1;
117 XArray* /* of HChar */ descr2;
120 // Could only narrow it down to be the PLT/GOT/etc of a given
121 // object. Better than nothing, perhaps.
127 // Classification yielded nothing useful.
133 /* ------------------ Errors ----------------------- */
135 /* What kind of error it is. */
155 typedef struct _MC_Error MC_Error;
158 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
162 // Use of an undefined value:
163 // - as a pointer in a load or store
164 // - as a jump target
166 SizeT szB; // size of value in bytes
168 UInt otag; // origin tag
169 ExeContext* origin_ec; // filled in later
172 // Use of an undefined value in a conditional branch or move.
175 UInt otag; // origin tag
176 ExeContext* origin_ec; // filled in later
179 // Addressability error in core (signal-handling) operation.
180 // It would be good to get rid of this error kind, merge it with
181 // another one somehow.
185 // Use of an unaddressable memory location in a load or store.
187 Bool isWrite; // read or write?
188 SizeT szB; // not used for exec (jump) errors
189 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
193 // Jump to an unaddressable memory location.
198 // System call register input contains undefined bytes.
201 UInt otag; // origin tag
202 ExeContext* origin_ec; // filled in later
205 // System call memory input contains undefined/unaddressable bytes
207 Bool isAddrErr; // Addressability or definedness error?
210 UInt otag; // origin tag
211 ExeContext* origin_ec; // filled in later
214 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
216 Bool isAddrErr; // Addressability or definedness error?
219 UInt otag; // origin tag
220 ExeContext* origin_ec; // filled in later
223 // Program tried to free() something that's not a heap block (this
224 // covers double-frees). */
229 // Program allocates heap block with one function
230 // (malloc/new/new[]/custom) and deallocates with not the matching one.
235 // Call to strcpy, memcpy, etc, with overlapping blocks.
237 Addr src; // Source block
238 Addr dst; // Destination block
239 Int szB; // Size in bytes; 0 if unused.
245 UInt n_total_records;
249 // A memory pool error.
258 /*------------------------------------------------------------*/
259 /*--- Printing errors ---*/
260 /*------------------------------------------------------------*/
262 /* This is the "this error is due to be printed shortly; so have a
263 look at it any print any preamble you want" function. Which, in
264 Memcheck, we don't use. Hence a no-op.
266 void MC_(before_pp_Error) ( Error* err ) {
269 /* Do a printf-style operation on either the XML or normal output
270 channel, depending on the setting of VG_(clo_xml).
272 static void emit_WRK ( HChar* format, va_list vargs )
275 VG_(vprintf_xml)(format, vargs);
277 VG_(vmessage)(Vg_UserMsg, format, vargs);
280 static void emit ( HChar* format, ... ) PRINTF_CHECK(1, 2);
281 static void emit ( HChar* format, ... )
284 va_start(vargs, format);
285 emit_WRK(format, vargs);
288 static void emiN ( HChar* format, ... ) /* NO FORMAT CHECK */
291 va_start(vargs, format);
292 emit_WRK(format, vargs);
297 static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
299 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
300 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
305 emit( "%sAddress 0x%llx is just below the stack ptr. "
306 "To suppress, use: --workaround-gcc296-bugs=yes%s\n",
307 xpre, (ULong)a, xpost );
309 emit( "%sAddress 0x%llx "
310 "is not stack'd, malloc'd or (recently) free'd%s\n",
311 xpre, (ULong)a, xpost );
317 emit( "%sAddress 0x%llx is on thread %d's stack%s\n",
318 xpre, (ULong)a, ai->Addr.Stack.tid, xpost );
322 SizeT block_szB = ai->Addr.Block.block_szB;
323 PtrdiffT rwoffset = ai->Addr.Block.rwoffset;
325 const Char* relative;
328 delta = (SizeT)(-rwoffset);
330 } else if (rwoffset >= block_szB) {
331 delta = rwoffset - block_szB;
338 "%sAddress 0x%lx is %'lu bytes %s a %s of size %'lu %s%s\n",
340 a, delta, relative, ai->Addr.Block.block_desc,
342 ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
343 : ai->Addr.Block.block_kind==Block_Freed ? "free'd"
347 VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
352 emiN( "%sAddress 0x%llx is %llu bytes "
353 "inside data symbol \"%t\"%s\n",
356 (ULong)ai->Addr.DataSym.offset,
357 ai->Addr.DataSym.name,
362 /* Note, no need for XML tags here, because descr1/2 will
363 already have <auxwhat> or <xauxwhat>s on them, in XML
365 if (ai->Addr.Variable.descr1)
367 VG_(clo_xml) ? " " : " ",
368 (HChar*)VG_(indexXA)(ai->Addr.Variable.descr1, 0) );
369 if (ai->Addr.Variable.descr2)
371 VG_(clo_xml) ? " " : " ",
372 (HChar*)VG_(indexXA)(ai->Addr.Variable.descr2, 0) );
376 emiN( "%sAddress 0x%llx is in the %t segment of %t%s\n",
379 VG_(pp_SectKind)(ai->Addr.SectKind.kind),
380 ai->Addr.SectKind.objname,
385 VG_(tool_panic)("mc_pp_AddrInfo");
389 static const HChar* str_leak_lossmode ( Reachedness lossmode )
391 const HChar *loss = "?";
393 case Unreached: loss = "definitely lost"; break;
394 case IndirectLeak: loss = "indirectly lost"; break;
395 case Possible: loss = "possibly lost"; break;
396 case Reachable: loss = "still reachable"; break;
401 static const HChar* xml_leak_kind ( Reachedness lossmode )
403 const HChar *loss = "?";
405 case Unreached: loss = "Leak_DefinitelyLost"; break;
406 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
407 case Possible: loss = "Leak_PossiblyLost"; break;
408 case Reachable: loss = "Leak_StillReachable"; break;
413 static void mc_pp_origin ( ExeContext* ec, UInt okind )
419 case MC_OKIND_STACK: src = " by a stack allocation"; break;
420 case MC_OKIND_HEAP: src = " by a heap allocation"; break;
421 case MC_OKIND_USER: src = " by a client request"; break;
422 case MC_OKIND_UNKNOWN: src = ""; break;
424 tl_assert(src); /* guards against invalid 'okind' */
427 emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
429 VG_(pp_ExeContext)( ec );
431 emit( " Uninitialised value was created%s\n", src);
432 VG_(pp_ExeContext)( ec );
436 void MC_(pp_Error) ( Error* err )
438 const Bool xml = VG_(clo_xml); /* a shorthand */
439 MC_Error* extra = VG_(get_error_extra)(err);
441 switch (VG_(get_error_kind)(err)) {
443 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
444 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
445 signal handler frame. --njn */
446 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
447 // the following code is untested. Bad.
449 emit( " <kind>CoreMemError</kind>\n" );
450 emiN( " <what>%t contains unaddressable byte(s)</what>\n",
451 VG_(get_error_string)(err));
452 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
454 emit( "%s contains unaddressable byte(s)\n",
455 VG_(get_error_string)(err));
456 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
461 MC_(any_value_errors) = True;
463 emit( " <kind>UninitValue</kind>\n" );
464 emit( " <what>Use of uninitialised value of size %ld</what>\n",
465 extra->Err.Value.szB );
466 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
467 if (extra->Err.Value.origin_ec)
468 mc_pp_origin( extra->Err.Value.origin_ec,
469 extra->Err.Value.otag & 3 );
471 /* Could also show extra->Err.Cond.otag if debugging origin
473 emit( "Use of uninitialised value of size %ld\n",
474 extra->Err.Value.szB );
475 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
476 if (extra->Err.Value.origin_ec)
477 mc_pp_origin( extra->Err.Value.origin_ec,
478 extra->Err.Value.otag & 3 );
483 MC_(any_value_errors) = True;
485 emit( " <kind>UninitCondition</kind>\n" );
486 emit( " <what>Conditional jump or move depends"
487 " on uninitialised value(s)</what>\n" );
488 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
489 if (extra->Err.Cond.origin_ec)
490 mc_pp_origin( extra->Err.Cond.origin_ec,
491 extra->Err.Cond.otag & 3 );
493 /* Could also show extra->Err.Cond.otag if debugging origin
495 emit( "Conditional jump or move depends"
496 " on uninitialised value(s)\n" );
497 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
498 if (extra->Err.Cond.origin_ec)
499 mc_pp_origin( extra->Err.Cond.origin_ec,
500 extra->Err.Cond.otag & 3 );
505 MC_(any_value_errors) = True;
507 emit( " <kind>SyscallParam</kind>\n" );
508 emiN( " <what>Syscall param %t contains "
509 "uninitialised byte(s)</what>\n",
510 VG_(get_error_string)(err) );
511 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
512 if (extra->Err.RegParam.origin_ec)
513 mc_pp_origin( extra->Err.RegParam.origin_ec,
514 extra->Err.RegParam.otag & 3 );
516 emit( "Syscall param %s contains uninitialised byte(s)\n",
517 VG_(get_error_string)(err) );
518 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
519 if (extra->Err.RegParam.origin_ec)
520 mc_pp_origin( extra->Err.RegParam.origin_ec,
521 extra->Err.RegParam.otag & 3 );
526 if (!extra->Err.MemParam.isAddrErr)
527 MC_(any_value_errors) = True;
529 emit( " <kind>SyscallParam</kind>\n" );
530 emiN( " <what>Syscall param %t points to %s byte(s)</what>\n",
531 VG_(get_error_string)(err),
532 extra->Err.MemParam.isAddrErr
533 ? "unaddressable" : "uninitialised" );
534 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
535 mc_pp_AddrInfo(VG_(get_error_address)(err),
536 &extra->Err.MemParam.ai, False);
537 if (extra->Err.MemParam.origin_ec
538 && !extra->Err.MemParam.isAddrErr)
539 mc_pp_origin( extra->Err.MemParam.origin_ec,
540 extra->Err.MemParam.otag & 3 );
542 emit( "Syscall param %s points to %s byte(s)\n",
543 VG_(get_error_string)(err),
544 extra->Err.MemParam.isAddrErr
545 ? "unaddressable" : "uninitialised" );
546 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
547 mc_pp_AddrInfo(VG_(get_error_address)(err),
548 &extra->Err.MemParam.ai, False);
549 if (extra->Err.MemParam.origin_ec
550 && !extra->Err.MemParam.isAddrErr)
551 mc_pp_origin( extra->Err.MemParam.origin_ec,
552 extra->Err.MemParam.otag & 3 );
557 if (!extra->Err.User.isAddrErr)
558 MC_(any_value_errors) = True;
560 emit( " <kind>ClientCheck</kind>\n" );
561 emit( " <what>%s byte(s) found "
562 "during client check request</what>\n",
563 extra->Err.User.isAddrErr
564 ? "Unaddressable" : "Uninitialised" );
565 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
566 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
568 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
569 mc_pp_origin( extra->Err.User.origin_ec,
570 extra->Err.User.otag & 3 );
572 emit( "%s byte(s) found during client check request\n",
573 extra->Err.User.isAddrErr
574 ? "Unaddressable" : "Uninitialised" );
575 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
576 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
578 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
579 mc_pp_origin( extra->Err.User.origin_ec,
580 extra->Err.User.otag & 3 );
586 emit( " <kind>InvalidFree</kind>\n" );
587 emit( " <what>Invalid free() / delete / delete[]</what>\n" );
588 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
589 mc_pp_AddrInfo( VG_(get_error_address)(err),
590 &extra->Err.Free.ai, False );
592 emit( "Invalid free() / delete / delete[]\n" );
593 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
594 mc_pp_AddrInfo( VG_(get_error_address)(err),
595 &extra->Err.Free.ai, False );
599 case Err_FreeMismatch:
601 emit( " <kind>MismatchedFree</kind>\n" );
602 emit( " <what>Mismatched free() / delete / delete []</what>\n" );
603 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
604 mc_pp_AddrInfo(VG_(get_error_address)(err),
605 &extra->Err.FreeMismatch.ai, False);
607 emit( "Mismatched free() / delete / delete []\n" );
608 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
609 mc_pp_AddrInfo(VG_(get_error_address)(err),
610 &extra->Err.FreeMismatch.ai, False);
616 emit( " <kind>Invalid%s</kind>\n",
617 extra->Err.Addr.isWrite ? "Write" : "Read" );
618 emit( " <what>Invalid %s of size %ld</what>\n",
619 extra->Err.Addr.isWrite ? "write" : "read",
620 extra->Err.Addr.szB );
621 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
622 mc_pp_AddrInfo( VG_(get_error_address)(err),
624 extra->Err.Addr.maybe_gcc );
626 emit( "Invalid %s of size %ld\n",
627 extra->Err.Addr.isWrite ? "write" : "read",
628 extra->Err.Addr.szB );
629 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
631 mc_pp_AddrInfo( VG_(get_error_address)(err),
633 extra->Err.Addr.maybe_gcc );
639 emit( " <kind>InvalidJump</kind>\n" );
640 emit( " <what>Jump to the invalid address stated "
641 "on the next line</what>\n" );
642 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
643 mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
646 emit( "Jump to the invalid address stated on the next line\n" );
647 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
648 mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
655 emit( " <kind>Overlap</kind>\n" );
656 if (extra->Err.Overlap.szB == 0) {
657 emiN( " <what>Source and destination overlap "
658 "in %t(%#lx, %#lx)\n</what>\n",
659 VG_(get_error_string)(err),
660 extra->Err.Overlap.dst, extra->Err.Overlap.src );
662 emit( " <what>Source and destination overlap "
663 "in %s(%#lx, %#lx, %d)</what>\n",
664 VG_(get_error_string)(err),
665 extra->Err.Overlap.dst, extra->Err.Overlap.src,
666 extra->Err.Overlap.szB );
668 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
670 if (extra->Err.Overlap.szB == 0) {
671 emiN( "Source and destination overlap in %t(%#lx, %#lx)\n",
672 VG_(get_error_string)(err),
673 extra->Err.Overlap.dst, extra->Err.Overlap.src );
675 emit( "Source and destination overlap in %s(%#lx, %#lx, %d)\n",
676 VG_(get_error_string)(err),
677 extra->Err.Overlap.dst, extra->Err.Overlap.src,
678 extra->Err.Overlap.szB );
680 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
684 case Err_IllegalMempool:
685 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
686 // the following code is untested. Bad.
688 emit( " <kind>InvalidMemPool</kind>\n" );
689 emit( " <what>Illegal memory pool address</what>\n" );
690 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
691 mc_pp_AddrInfo( VG_(get_error_address)(err),
692 &extra->Err.IllegalMempool.ai, False );
694 emit( "Illegal memory pool address\n" );
695 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
696 mc_pp_AddrInfo( VG_(get_error_address)(err),
697 &extra->Err.IllegalMempool.ai, False );
702 UInt n_this_record = extra->Err.Leak.n_this_record;
703 UInt n_total_records = extra->Err.Leak.n_total_records;
704 LossRecord* lr = extra->Err.Leak.lr;
706 emit(" <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
707 if (lr->indirect_szB > 0) {
708 emit( " <xwhat>\n" );
709 emit( " <text>%'lu (%'lu direct, %'lu indirect) bytes "
711 " are %s in loss record %'u of %'u</text>\n",
712 lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
714 str_leak_lossmode(lr->key.state),
715 n_this_record, n_total_records );
716 // Nb: don't put commas in these XML numbers
717 emit( " <leakedbytes>%lu</leakedbytes>\n",
718 lr->szB + lr->indirect_szB );
719 emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
720 emit( " </xwhat>\n" );
722 emit( " <xwhat>\n" );
723 emit( " <text>%'lu bytes in %'u blocks"
724 " are %s in loss record %'u of %'u</text>\n",
725 lr->szB, lr->num_blocks,
726 str_leak_lossmode(lr->key.state),
727 n_this_record, n_total_records );
728 emit( " <leakedbytes>%ld</leakedbytes>\n", lr->szB);
729 emit( " <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
730 emit( " </xwhat>\n" );
732 VG_(pp_ExeContext)(lr->key.allocated_at);
733 } else { /* ! if (xml) */
734 if (lr->indirect_szB > 0) {
736 "%'lu (%'lu direct, %'lu indirect) bytes in %'u blocks"
737 " are %s in loss record %'u of %'u\n",
738 lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
739 lr->num_blocks, str_leak_lossmode(lr->key.state),
740 n_this_record, n_total_records
744 "%'lu bytes in %'u blocks are %s in loss record %'u of %'u\n",
745 lr->szB, lr->num_blocks, str_leak_lossmode(lr->key.state),
746 n_this_record, n_total_records
749 VG_(pp_ExeContext)(lr->key.allocated_at);
755 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
756 VG_(get_error_kind)(err));
757 VG_(tool_panic)("unknown error code in mc_pp_Error)");
761 /*------------------------------------------------------------*/
762 /*--- Recording errors ---*/
763 /*------------------------------------------------------------*/
765 /* These many bytes below %ESP are considered addressible if we're
766 doing the --workaround-gcc296-bugs hack. */
767 #define VG_GCC296_BUG_STACK_SLOP 1024
769 /* Is this address within some small distance below %ESP? Used only
770 for the --workaround-gcc296-bugs kludge. */
771 static Bool is_just_below_ESP( Addr esp, Addr aa )
773 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
779 /* --- Called from generated and non-generated code --- */
781 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
787 if (MC_(in_ignored_range)(a))
790 # if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
791 /* AIX zero-page handling. On AIX, reads from page zero are,
792 bizarrely enough, legitimate. Writes to page zero aren't,
793 though. Since memcheck can't distinguish reads from writes, the
794 best we can do is to 'act normal' and mark the A bits in the
795 normal way as noaccess, but then hide any reads from that page
796 that get reported here. */
797 if ((!isWrite) && a >= 0 && a < 4096 && a+szB <= 4096)
800 /* Appalling AIX hack. It suppresses reads done by glink
801 fragments. Getting rid of this would require figuring out
802 somehow where the referenced data areas are (and their
804 if ((!isWrite) && szB == sizeof(Word)) {
806 UInt* pc = (UInt*)VG_(get_IP)(tid);
807 if (sizeof(Word) == 4) {
808 i1 = 0x800c0000; /* lwz r0,0(r12) */
809 i2 = 0x804c0004; /* lwz r2,4(r12) */
811 i1 = 0xe80c0000; /* ld r0,0(r12) */
812 i2 = 0xe84c0008; /* ld r2,8(r12) */
814 if (pc[0] == i1 && pc[1] == i2) return;
815 if (pc[0] == i2 && pc[-1] == i1) return;
819 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
821 /* If this is caused by an access immediately below %ESP, and the
822 user asks nicely, we just ignore it. */
823 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
826 extra.Err.Addr.isWrite = isWrite;
827 extra.Err.Addr.szB = szB;
828 extra.Err.Addr.maybe_gcc = just_below_esp;
829 extra.Err.Addr.ai.tag = Addr_Undescribed;
830 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
833 void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
836 tl_assert( MC_(clo_mc_level) >= 2 );
838 tl_assert( MC_(clo_mc_level) == 3 );
839 extra.Err.Value.szB = szB;
840 extra.Err.Value.otag = otag;
841 extra.Err.Value.origin_ec = NULL; /* Filled in later */
842 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
845 void MC_(record_cond_error) ( ThreadId tid, UInt otag )
848 tl_assert( MC_(clo_mc_level) >= 2 );
850 tl_assert( MC_(clo_mc_level) == 3 );
851 extra.Err.Cond.otag = otag;
852 extra.Err.Cond.origin_ec = NULL; /* Filled in later */
853 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
856 /* --- Called from non-generated code --- */
858 /* This is for memory errors in signal-related memory. */
859 void MC_(record_core_mem_error) ( ThreadId tid, Char* msg )
861 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
864 void MC_(record_regparam_error) ( ThreadId tid, Char* msg, UInt otag )
867 tl_assert(VG_INVALID_THREADID != tid);
869 tl_assert( MC_(clo_mc_level) == 3 );
870 extra.Err.RegParam.otag = otag;
871 extra.Err.RegParam.origin_ec = NULL; /* Filled in later */
872 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
875 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
876 Bool isAddrErr, Char* msg, UInt otag )
879 tl_assert(VG_INVALID_THREADID != tid);
881 tl_assert( MC_(clo_mc_level) >= 2 );
883 tl_assert( MC_(clo_mc_level) == 3 );
884 tl_assert( !isAddrErr );
886 extra.Err.MemParam.isAddrErr = isAddrErr;
887 extra.Err.MemParam.ai.tag = Addr_Undescribed;
888 extra.Err.MemParam.otag = otag;
889 extra.Err.MemParam.origin_ec = NULL; /* Filled in later */
890 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
893 void MC_(record_jump_error) ( ThreadId tid, Addr a )
896 tl_assert(VG_INVALID_THREADID != tid);
897 extra.Err.Jump.ai.tag = Addr_Undescribed;
898 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
901 void MC_(record_free_error) ( ThreadId tid, Addr a )
904 tl_assert(VG_INVALID_THREADID != tid);
905 extra.Err.Free.ai.tag = Addr_Undescribed;
906 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
909 void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
912 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
913 tl_assert(VG_INVALID_THREADID != tid);
914 ai->tag = Addr_Block;
915 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
916 ai->Addr.Block.block_desc = "block";
917 ai->Addr.Block.block_szB = mc->szB;
918 ai->Addr.Block.rwoffset = 0;
919 ai->Addr.Block.lastchange = mc->where;
920 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
924 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
927 tl_assert(VG_INVALID_THREADID != tid);
928 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
929 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
932 void MC_(record_overlap_error) ( ThreadId tid, Char* function,
933 Addr src, Addr dst, SizeT szB )
936 tl_assert(VG_INVALID_THREADID != tid);
937 extra.Err.Overlap.src = src;
938 extra.Err.Overlap.dst = dst;
939 extra.Err.Overlap.szB = szB;
940 VG_(maybe_record_error)(
941 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
944 Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
945 UInt n_total_records, LossRecord* lr,
946 Bool print_record, Bool count_error )
949 extra.Err.Leak.n_this_record = n_this_record;
950 extra.Err.Leak.n_total_records = n_total_records;
951 extra.Err.Leak.lr = lr;
953 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
954 lr->key.allocated_at, print_record,
955 /*allow_GDB_attach*/False, count_error );
958 void MC_(record_user_error) ( ThreadId tid, Addr a,
959 Bool isAddrErr, UInt otag )
963 tl_assert(!isAddrErr);
964 tl_assert( MC_(clo_mc_level) == 3 );
967 tl_assert( MC_(clo_mc_level) >= 2 );
969 tl_assert(VG_INVALID_THREADID != tid);
970 extra.Err.User.isAddrErr = isAddrErr;
971 extra.Err.User.ai.tag = Addr_Undescribed;
972 extra.Err.User.otag = otag;
973 extra.Err.User.origin_ec = NULL; /* Filled in later */
974 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
977 /*------------------------------------------------------------*/
978 /*--- Other error operations ---*/
979 /*------------------------------------------------------------*/
981 /* Compare error contexts, to detect duplicates. Note that if they
982 are otherwise the same, the faulting addrs and associated rwoffsets
983 are allowed to be different. */
984 Bool MC_(eq_Error) ( VgRes res, Error* e1, Error* e2 )
986 MC_Error* extra1 = VG_(get_error_extra)(e1);
987 MC_Error* extra2 = VG_(get_error_extra)(e2);
989 /* Guaranteed by calling function */
990 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
992 switch (VG_(get_error_kind)(e1)) {
995 e1s = VG_(get_error_string)(e1);
996 e2s = VG_(get_error_string)(e2);
997 if (e1s == e2s) return True;
998 if (VG_STREQ(e1s, e2s)) return True;
1003 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
1005 // Perhaps we should also check the addrinfo.akinds for equality.
1006 // That would result in more error reports, but only in cases where
1007 // a register contains uninitialised bytes and points to memory
1008 // containing uninitialised bytes. Currently, the 2nd of those to be
1009 // detected won't be reported. That is (nearly?) always the memory
1010 // error, which is good.
1012 if (!VG_STREQ(VG_(get_error_string)(e1),
1013 VG_(get_error_string)(e2))) return False;
1016 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
1020 case Err_FreeMismatch:
1022 case Err_IllegalMempool:
1028 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
1032 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
1036 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1037 "since it's handled with VG_(unique_error)()!");
1040 VG_(printf)("Error:\n unknown error code %d\n",
1041 VG_(get_error_kind)(e1));
1042 VG_(tool_panic)("unknown error code in mc_eq_Error");
1046 /* Function used when searching MC_Chunk lists */
1047 static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a)
1049 // Nb: this is not quite right! It assumes that the heap block has
1050 // a redzone of size MC_MALLOC_REDZONE_SZB. That's true for malloc'd
1051 // blocks, but not necessarily true for custom-alloc'd blocks. So
1052 // in some cases this could result in an incorrect description (eg.
1053 // saying "12 bytes after block A" when really it's within block B.
1054 // Fixing would require adding redzone size to MC_Chunks, though.
1055 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1056 MC_MALLOC_REDZONE_SZB );
1059 // Forward declaration
1060 static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
1063 /* Describe an address as best you can, for error messages,
1064 putting the result in ai. */
1065 static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
1069 Addr stack_min, stack_max;
1072 tl_assert(Addr_Undescribed == ai->tag);
1074 /* -- Perhaps it's a user-def'd block? -- */
1075 if (client_block_maybe_describe( a, ai )) {
1078 /* -- Search for a recently freed block which might bracket it. -- */
1079 mc = MC_(get_freed_list_head)();
1081 if (addr_is_in_MC_Chunk(mc, a)) {
1082 ai->tag = Addr_Block;
1083 ai->Addr.Block.block_kind = Block_Freed;
1084 ai->Addr.Block.block_desc = "block";
1085 ai->Addr.Block.block_szB = mc->szB;
1086 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1087 ai->Addr.Block.lastchange = mc->where;
1092 /* -- Search for a currently malloc'd block which might bracket it. -- */
1093 VG_(HT_ResetIter)(MC_(malloc_list));
1094 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1095 if (addr_is_in_MC_Chunk(mc, a)) {
1096 ai->tag = Addr_Block;
1097 ai->Addr.Block.block_kind = Block_Mallocd;
1098 ai->Addr.Block.block_desc = "block";
1099 ai->Addr.Block.block_szB = mc->szB;
1100 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1101 ai->Addr.Block.lastchange = mc->where;
1105 /* -- Perhaps the variable type/location data describes it? -- */
1106 ai->Addr.Variable.descr1
1107 = VG_(newXA)( VG_(malloc), "mc.da.descr1",
1108 VG_(free), sizeof(HChar) );
1109 ai->Addr.Variable.descr2
1110 = VG_(newXA)( VG_(malloc), "mc.da.descr2",
1111 VG_(free), sizeof(HChar) );
1113 (void) VG_(get_data_description)( ai->Addr.Variable.descr1,
1114 ai->Addr.Variable.descr2, a );
1115 /* If there's nothing in descr1/2, free them. Why is it safe to to
1116 VG_(indexXA) at zero here? Because VG_(get_data_description)
1117 guarantees to zero terminate descr1/2 regardless of the outcome
1118 of the call. So there's always at least one element in each XA
1121 if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr1, 0 ))) {
1122 VG_(deleteXA)( ai->Addr.Variable.descr1 );
1123 ai->Addr.Variable.descr1 = NULL;
1125 if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr2, 0 ))) {
1126 VG_(deleteXA)( ai->Addr.Variable.descr2 );
1127 ai->Addr.Variable.descr2 = NULL;
1129 /* Assume (assert) that VG_(get_data_description) fills in descr1
1130 before it fills in descr2 */
1131 if (ai->Addr.Variable.descr1 == NULL)
1132 tl_assert(ai->Addr.Variable.descr2 == NULL);
1133 /* So did we get lucky? */
1134 if (ai->Addr.Variable.descr1 != NULL) {
1135 ai->tag = Addr_Variable;
1138 /* -- Have a look at the low level data symbols - perhaps it's in
1140 VG_(memset)( &ai->Addr.DataSym.name,
1141 0, sizeof(ai->Addr.DataSym.name));
1142 if (VG_(get_datasym_and_offset)(
1143 a, &ai->Addr.DataSym.name[0],
1144 sizeof(ai->Addr.DataSym.name)-1,
1145 &ai->Addr.DataSym.offset )) {
1146 ai->tag = Addr_DataSym;
1147 tl_assert( ai->Addr.DataSym.name
1148 [ sizeof(ai->Addr.DataSym.name)-1 ] == 0);
1151 /* -- Perhaps it's on a thread's stack? -- */
1152 VG_(thread_stack_reset_iter)(&tid);
1153 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
1154 if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) {
1155 ai->tag = Addr_Stack;
1156 ai->Addr.Stack.tid = tid;
1160 /* -- last ditch attempt at classification -- */
1161 tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 );
1162 VG_(memset)( &ai->Addr.SectKind.objname,
1163 0, sizeof(ai->Addr.SectKind.objname));
1164 VG_(strcpy)( ai->Addr.SectKind.objname, "???" );
1165 sect = VG_(DebugInfo_sect_kind)( &ai->Addr.SectKind.objname[0],
1166 sizeof(ai->Addr.SectKind.objname)-1, a);
1167 if (sect != Vg_SectUnknown) {
1168 ai->tag = Addr_SectKind;
1169 ai->Addr.SectKind.kind = sect;
1170 tl_assert( ai->Addr.SectKind.objname
1171 [ sizeof(ai->Addr.SectKind.objname)-1 ] == 0);
1174 /* -- Clueless ... -- */
1175 ai->tag = Addr_Unknown;
1179 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
1180 does not refer to a known origin. */
1181 static void update_origin ( /*OUT*/ExeContext** origin_ec,
1184 UInt ecu = otag & ~3;
1186 if (VG_(is_plausible_ECU)(ecu)) {
1187 *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
1191 /* Updates the copy with address info if necessary (but not for all errors). */
1192 UInt MC_(update_Error_extra)( Error* err )
1194 MC_Error* extra = VG_(get_error_extra)(err);
1196 switch (VG_(get_error_kind)(err)) {
1197 // These ones don't have addresses associated with them, and so don't
1198 // need any updating.
1203 // For Err_Leaks the returned size does not matter -- they are always
1204 // shown with VG_(unique_error)() so they 'extra' not copied. But
1205 // we make it consistent with the others.
1207 return sizeof(MC_Error);
1209 // For value errors, get the ExeContext corresponding to the
1210 // origin tag. Note that it is a kludge to assume that
1211 // a length-1 trace indicates a stack origin. FIXME.
1213 update_origin( &extra->Err.Value.origin_ec,
1214 extra->Err.Value.otag );
1215 return sizeof(MC_Error);
1217 update_origin( &extra->Err.Cond.origin_ec,
1218 extra->Err.Cond.otag );
1219 return sizeof(MC_Error);
1221 update_origin( &extra->Err.RegParam.origin_ec,
1222 extra->Err.RegParam.otag );
1223 return sizeof(MC_Error);
1225 // These ones always involve a memory address.
1227 describe_addr ( VG_(get_error_address)(err),
1228 &extra->Err.Addr.ai );
1229 return sizeof(MC_Error);
1231 describe_addr ( VG_(get_error_address)(err),
1232 &extra->Err.MemParam.ai );
1233 update_origin( &extra->Err.MemParam.origin_ec,
1234 extra->Err.MemParam.otag );
1235 return sizeof(MC_Error);
1237 describe_addr ( VG_(get_error_address)(err),
1238 &extra->Err.Jump.ai );
1239 return sizeof(MC_Error);
1241 describe_addr ( VG_(get_error_address)(err),
1242 &extra->Err.User.ai );
1243 update_origin( &extra->Err.User.origin_ec,
1244 extra->Err.User.otag );
1245 return sizeof(MC_Error);
1247 describe_addr ( VG_(get_error_address)(err),
1248 &extra->Err.Free.ai );
1249 return sizeof(MC_Error);
1250 case Err_IllegalMempool:
1251 describe_addr ( VG_(get_error_address)(err),
1252 &extra->Err.IllegalMempool.ai );
1253 return sizeof(MC_Error);
1255 // Err_FreeMismatches have already had their address described; this is
1256 // possible because we have the MC_Chunk on hand when the error is
1257 // detected. However, the address may be part of a user block, and if so
1258 // we override the pre-determined description with a user block one.
1259 case Err_FreeMismatch: {
1260 tl_assert(extra && Block_Mallocd ==
1261 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
1262 (void)client_block_maybe_describe( VG_(get_error_address)(err),
1263 &extra->Err.FreeMismatch.ai );
1264 return sizeof(MC_Error);
1267 default: VG_(tool_panic)("mc_update_extra: bad errkind");
1271 // FIXME: does this perhaps want to live somewhere else
1273 static Bool client_block_maybe_describe( Addr a,
1274 /*OUT*/AddrInfo* ai )
1277 CGenBlock* cgbs = NULL;
1280 MC_(get_ClientBlock_array)( &cgbs, &cgb_used );
1282 tl_assert(cgb_used == 0);
1284 /* Perhaps it's a general block ? */
1285 for (i = 0; i < cgb_used; i++) {
1286 if (cgbs[i].start == 0 && cgbs[i].size == 0)
1288 // Use zero as the redzone for client blocks.
1289 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
1290 /* OK - maybe it's a mempool, too? */
1291 MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list),
1292 (UWord)cgbs[i].start);
1294 if (mp->chunks != NULL) {
1296 VG_(HT_ResetIter)(mp->chunks);
1297 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1298 if (addr_is_in_MC_Chunk(mc, a)) {
1299 ai->tag = Addr_Block;
1300 ai->Addr.Block.block_kind = Block_MempoolChunk;
1301 ai->Addr.Block.block_desc = "block";
1302 ai->Addr.Block.block_szB = mc->szB;
1303 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1304 ai->Addr.Block.lastchange = mc->where;
1309 ai->tag = Addr_Block;
1310 ai->Addr.Block.block_kind = Block_Mempool;
1311 ai->Addr.Block.block_desc = "mempool";
1312 ai->Addr.Block.block_szB = cgbs[i].size;
1313 ai->Addr.Block.rwoffset = (Word)(a) - (Word)(cgbs[i].start);
1314 ai->Addr.Block.lastchange = cgbs[i].where;
1317 ai->tag = Addr_Block;
1318 ai->Addr.Block.block_kind = Block_UserG;
1319 ai->Addr.Block.block_desc = cgbs[i].desc;
1320 ai->Addr.Block.block_szB = cgbs[i].size;
1321 ai->Addr.Block.rwoffset = (Word)(a) - (Word)(cgbs[i].start);
1322 ai->Addr.Block.lastchange = cgbs[i].where;
1330 /*------------------------------------------------------------*/
1331 /*--- Suppressions ---*/
1332 /*------------------------------------------------------------*/
1336 ParamSupp, // Bad syscall params
1337 UserSupp, // Errors arising from client-request checks
1338 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
1340 // Undefined value errors of given size
1341 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
1343 // Undefined value error in conditional.
1346 // Unaddressable read/write attempt at given size
1347 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
1349 JumpSupp, // Jump to unaddressable target
1350 FreeSupp, // Invalid or mismatching free
1351 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
1352 LeakSupp, // Something to be suppressed in a leak check.
1353 MempoolSupp, // Memory pool suppression.
1357 Bool MC_(is_recognised_suppression) ( Char* name, Supp* su )
1361 if (VG_STREQ(name, "Param")) skind = ParamSupp;
1362 else if (VG_STREQ(name, "User")) skind = UserSupp;
1363 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
1364 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
1365 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
1366 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
1367 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
1368 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
1369 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
1370 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
1371 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
1372 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
1373 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
1374 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
1375 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
1376 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1377 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1378 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1379 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1380 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1384 VG_(set_supp_kind)(su, skind);
1388 Bool MC_(read_extra_suppression_info) ( Int fd, Char** bufpp,
1389 SizeT* nBufp, Supp *su )
1393 if (VG_(get_supp_kind)(su) == ParamSupp) {
1394 eof = VG_(get_line) ( fd, bufpp, nBufp, NULL );
1395 if (eof) return False;
1396 VG_(set_supp_string)(su, VG_(strdup)("mc.resi.1", *bufpp));
1401 Bool MC_(error_matches_suppression) ( Error* err, Supp* su )
1404 MC_Error* extra = VG_(get_error_extra)(err);
1405 ErrorKind ekind = VG_(get_error_kind )(err);
1407 switch (VG_(get_supp_kind)(su)) {
1409 return ((ekind == Err_RegParam || ekind == Err_MemParam)
1410 && VG_STREQ(VG_(get_error_string)(err),
1411 VG_(get_supp_string)(su)));
1414 return (ekind == Err_User);
1417 return (ekind == Err_CoreMem
1418 && VG_STREQ(VG_(get_error_string)(err),
1419 VG_(get_supp_string)(su)));
1421 case Value1Supp: su_szB = 1; goto value_case;
1422 case Value2Supp: su_szB = 2; goto value_case;
1423 case Value4Supp: su_szB = 4; goto value_case;
1424 case Value8Supp: su_szB = 8; goto value_case;
1425 case Value16Supp:su_szB =16; goto value_case;
1427 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
1430 return (ekind == Err_Cond);
1432 case Addr1Supp: su_szB = 1; goto addr_case;
1433 case Addr2Supp: su_szB = 2; goto addr_case;
1434 case Addr4Supp: su_szB = 4; goto addr_case;
1435 case Addr8Supp: su_szB = 8; goto addr_case;
1436 case Addr16Supp:su_szB =16; goto addr_case;
1438 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
1441 return (ekind == Err_Jump);
1444 return (ekind == Err_Free || ekind == Err_FreeMismatch);
1447 return (ekind == Err_Overlap);
1450 return (ekind == Err_Leak);
1453 return (ekind == Err_IllegalMempool);
1456 VG_(printf)("Error:\n"
1457 " unknown suppression type %d\n",
1458 VG_(get_supp_kind)(su));
1459 VG_(tool_panic)("unknown suppression type in "
1460 "MC_(error_matches_suppression)");
1464 Char* MC_(get_error_name) ( Error* err )
1466 switch (VG_(get_error_kind)(err)) {
1467 case Err_RegParam: return "Param";
1468 case Err_MemParam: return "Param";
1469 case Err_User: return "User";
1470 case Err_FreeMismatch: return "Free";
1471 case Err_IllegalMempool: return "Mempool";
1472 case Err_Free: return "Free";
1473 case Err_Jump: return "Jump";
1474 case Err_CoreMem: return "CoreMem";
1475 case Err_Overlap: return "Overlap";
1476 case Err_Leak: return "Leak";
1477 case Err_Cond: return "Cond";
1479 MC_Error* extra = VG_(get_error_extra)(err);
1480 switch ( extra->Err.Addr.szB ) {
1481 case 1: return "Addr1";
1482 case 2: return "Addr2";
1483 case 4: return "Addr4";
1484 case 8: return "Addr8";
1485 case 16: return "Addr16";
1486 default: VG_(tool_panic)("unexpected size for Addr");
1490 MC_Error* extra = VG_(get_error_extra)(err);
1491 switch ( extra->Err.Value.szB ) {
1492 case 1: return "Value1";
1493 case 2: return "Value2";
1494 case 4: return "Value4";
1495 case 8: return "Value8";
1496 case 16: return "Value16";
1497 default: VG_(tool_panic)("unexpected size for Value");
1500 default: VG_(tool_panic)("get_error_name: unexpected type");
1504 Bool MC_(get_extra_suppression_info) ( Error* err,
1505 /*OUT*/Char* buf, Int nBuf )
1507 ErrorKind ekind = VG_(get_error_kind )(err);
1509 tl_assert(nBuf >= 16); // stay sane
1510 if (Err_RegParam == ekind || Err_MemParam == ekind) {
1511 Char* errstr = VG_(get_error_string)(err);
1513 VG_(snprintf)(buf, nBuf-1, "%s", errstr);
1521 /*--------------------------------------------------------------------*/
1522 /*--- end mc_errors.c ---*/
1523 /*--------------------------------------------------------------------*/