2 /*--------------------------------------------------------------------*/
3 /*--- Management, printing, etc, of errors and suppressions. ---*/
4 /*--- mc_errors.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of MemCheck, a heavyweight Valgrind tool for
9 detecting memory errors.
11 Copyright (C) 2000-2010 Julian Seward
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_gdbserver.h"
34 #include "pub_tool_hashtable.h" // For mc_include.h
35 #include "pub_tool_libcbase.h"
36 #include "pub_tool_libcassert.h"
37 #include "pub_tool_libcprint.h"
38 #include "pub_tool_machine.h"
39 #include "pub_tool_mallocfree.h"
40 #include "pub_tool_options.h"
41 #include "pub_tool_replacemalloc.h"
42 #include "pub_tool_tooliface.h"
43 #include "pub_tool_threadstate.h"
44 #include "pub_tool_debuginfo.h" // VG_(get_dataname_and_offset)
45 #include "pub_tool_xarray.h"
48 #include "mc_include.h"
51 /*------------------------------------------------------------*/
52 /*--- Error types ---*/
53 /*------------------------------------------------------------*/
55 /* See comment in mc_include.h */
56 Bool MC_(any_value_errors) = False;
59 // Different kinds of blocks.
68 /* ------------------ Addresses -------------------- */
70 /* The classification of a faulting address. */
73 Addr_Undescribed, // as-yet unclassified
74 Addr_Unknown, // classification yielded nothing useful
75 Addr_Block, // in malloc'd/free'd block
76 Addr_Stack, // on a thread's stack
77 Addr_DataSym, // in a global data sym
78 Addr_Variable, // variable described by the debug info
79 Addr_SectKind // last-ditch classification attempt
90 // As-yet unclassified.
91 struct { } Undescribed;
95 ThreadId tid; // Which thread's stack?
98 // This covers heap blocks (normal and from mempools) and user-defined
101 BlockKind block_kind;
102 Char* block_desc; // "block", "mempool" or user-defined
105 ExeContext* lastchange;
108 // In a global .data symbol. This holds the first 127 chars of
109 // the variable's name (zero terminated), plus a (memory) offset.
115 // Is described by Dwarf debug info. XArray*s of HChar.
117 XArray* /* of HChar */ descr1;
118 XArray* /* of HChar */ descr2;
121 // Could only narrow it down to be the PLT/GOT/etc of a given
122 // object. Better than nothing, perhaps.
128 // Classification yielded nothing useful.
134 /* ------------------ Errors ----------------------- */
136 /* What kind of error it is. */
156 typedef struct _MC_Error MC_Error;
159 // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
163 // Use of an undefined value:
164 // - as a pointer in a load or store
165 // - as a jump target
167 SizeT szB; // size of value in bytes
169 UInt otag; // origin tag
170 ExeContext* origin_ec; // filled in later
173 // Use of an undefined value in a conditional branch or move.
176 UInt otag; // origin tag
177 ExeContext* origin_ec; // filled in later
180 // Addressability error in core (signal-handling) operation.
181 // It would be good to get rid of this error kind, merge it with
182 // another one somehow.
186 // Use of an unaddressable memory location in a load or store.
188 Bool isWrite; // read or write?
189 SizeT szB; // not used for exec (jump) errors
190 Bool maybe_gcc; // True if just below %esp -- could be a gcc bug
194 // Jump to an unaddressable memory location.
199 // System call register input contains undefined bytes.
202 UInt otag; // origin tag
203 ExeContext* origin_ec; // filled in later
206 // System call memory input contains undefined/unaddressable bytes
208 Bool isAddrErr; // Addressability or definedness error?
211 UInt otag; // origin tag
212 ExeContext* origin_ec; // filled in later
215 // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
217 Bool isAddrErr; // Addressability or definedness error?
220 UInt otag; // origin tag
221 ExeContext* origin_ec; // filled in later
224 // Program tried to free() something that's not a heap block (this
225 // covers double-frees). */
230 // Program allocates heap block with one function
231 // (malloc/new/new[]/custom) and deallocates with not the matching one.
236 // Call to strcpy, memcpy, etc, with overlapping blocks.
238 Addr src; // Source block
239 Addr dst; // Destination block
240 Int szB; // Size in bytes; 0 if unused.
246 UInt n_total_records;
250 // A memory pool error.
259 /*------------------------------------------------------------*/
260 /*--- Printing errors ---*/
261 /*------------------------------------------------------------*/
263 /* This is the "this error is due to be printed shortly; so have a
264 look at it any print any preamble you want" function. Which, in
265 Memcheck, we don't use. Hence a no-op.
267 void MC_(before_pp_Error) ( Error* err ) {
270 /* Do a printf-style operation on either the XML or normal output
271 channel, depending on the setting of VG_(clo_xml).
273 static void emit_WRK ( HChar* format, va_list vargs )
276 VG_(vprintf_xml)(format, vargs);
278 VG_(vmessage)(Vg_UserMsg, format, vargs);
281 static void emit ( HChar* format, ... ) PRINTF_CHECK(1, 2);
282 static void emit ( HChar* format, ... )
285 va_start(vargs, format);
286 emit_WRK(format, vargs);
289 static void emiN ( HChar* format, ... ) /* NO FORMAT CHECK */
292 va_start(vargs, format);
293 emit_WRK(format, vargs);
298 static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
300 HChar* xpre = VG_(clo_xml) ? " <auxwhat>" : " ";
301 HChar* xpost = VG_(clo_xml) ? "</auxwhat>" : "";
306 emit( "%sAddress 0x%llx is just below the stack ptr. "
307 "To suppress, use: --workaround-gcc296-bugs=yes%s\n",
308 xpre, (ULong)a, xpost );
310 emit( "%sAddress 0x%llx "
311 "is not stack'd, malloc'd or (recently) free'd%s\n",
312 xpre, (ULong)a, xpost );
318 emit( "%sAddress 0x%llx is on thread %d's stack%s\n",
319 xpre, (ULong)a, ai->Addr.Stack.tid, xpost );
323 SizeT block_szB = ai->Addr.Block.block_szB;
324 PtrdiffT rwoffset = ai->Addr.Block.rwoffset;
326 const Char* relative;
329 delta = (SizeT)(-rwoffset);
331 } else if (rwoffset >= block_szB) {
332 delta = rwoffset - block_szB;
339 "%sAddress 0x%lx is %'lu bytes %s a %s of size %'lu %s%s\n",
341 a, delta, relative, ai->Addr.Block.block_desc,
343 ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd"
344 : ai->Addr.Block.block_kind==Block_Freed ? "free'd"
348 VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
353 emiN( "%sAddress 0x%llx is %llu bytes "
354 "inside data symbol \"%t\"%s\n",
357 (ULong)ai->Addr.DataSym.offset,
358 ai->Addr.DataSym.name,
363 /* Note, no need for XML tags here, because descr1/2 will
364 already have <auxwhat> or <xauxwhat>s on them, in XML
366 if (ai->Addr.Variable.descr1)
368 VG_(clo_xml) ? " " : " ",
369 (HChar*)VG_(indexXA)(ai->Addr.Variable.descr1, 0) );
370 if (ai->Addr.Variable.descr2)
372 VG_(clo_xml) ? " " : " ",
373 (HChar*)VG_(indexXA)(ai->Addr.Variable.descr2, 0) );
377 emiN( "%sAddress 0x%llx is in the %t segment of %t%s\n",
380 VG_(pp_SectKind)(ai->Addr.SectKind.kind),
381 ai->Addr.SectKind.objname,
386 VG_(tool_panic)("mc_pp_AddrInfo");
390 static const HChar* str_leak_lossmode ( Reachedness lossmode )
392 const HChar *loss = "?";
394 case Unreached: loss = "definitely lost"; break;
395 case IndirectLeak: loss = "indirectly lost"; break;
396 case Possible: loss = "possibly lost"; break;
397 case Reachable: loss = "still reachable"; break;
402 static const HChar* xml_leak_kind ( Reachedness lossmode )
404 const HChar *loss = "?";
406 case Unreached: loss = "Leak_DefinitelyLost"; break;
407 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
408 case Possible: loss = "Leak_PossiblyLost"; break;
409 case Reachable: loss = "Leak_StillReachable"; break;
414 static void mc_pp_origin ( ExeContext* ec, UInt okind )
420 case MC_OKIND_STACK: src = " by a stack allocation"; break;
421 case MC_OKIND_HEAP: src = " by a heap allocation"; break;
422 case MC_OKIND_USER: src = " by a client request"; break;
423 case MC_OKIND_UNKNOWN: src = ""; break;
425 tl_assert(src); /* guards against invalid 'okind' */
428 emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
430 VG_(pp_ExeContext)( ec );
432 emit( " Uninitialised value was created%s\n", src);
433 VG_(pp_ExeContext)( ec );
437 void MC_(pp_Error) ( Error* err )
439 const Bool xml = VG_(clo_xml); /* a shorthand */
440 MC_Error* extra = VG_(get_error_extra)(err);
442 switch (VG_(get_error_kind)(err)) {
444 /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
445 /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
446 signal handler frame. --njn */
447 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
448 // the following code is untested. Bad.
450 emit( " <kind>CoreMemError</kind>\n" );
451 emiN( " <what>%t contains unaddressable byte(s)</what>\n",
452 VG_(get_error_string)(err));
453 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
455 emit( "%s contains unaddressable byte(s)\n",
456 VG_(get_error_string)(err));
457 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
462 MC_(any_value_errors) = True;
464 emit( " <kind>UninitValue</kind>\n" );
465 emit( " <what>Use of uninitialised value of size %ld</what>\n",
466 extra->Err.Value.szB );
467 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
468 if (extra->Err.Value.origin_ec)
469 mc_pp_origin( extra->Err.Value.origin_ec,
470 extra->Err.Value.otag & 3 );
472 /* Could also show extra->Err.Cond.otag if debugging origin
474 emit( "Use of uninitialised value of size %ld\n",
475 extra->Err.Value.szB );
476 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
477 if (extra->Err.Value.origin_ec)
478 mc_pp_origin( extra->Err.Value.origin_ec,
479 extra->Err.Value.otag & 3 );
484 MC_(any_value_errors) = True;
486 emit( " <kind>UninitCondition</kind>\n" );
487 emit( " <what>Conditional jump or move depends"
488 " on uninitialised value(s)</what>\n" );
489 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
490 if (extra->Err.Cond.origin_ec)
491 mc_pp_origin( extra->Err.Cond.origin_ec,
492 extra->Err.Cond.otag & 3 );
494 /* Could also show extra->Err.Cond.otag if debugging origin
496 emit( "Conditional jump or move depends"
497 " on uninitialised value(s)\n" );
498 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
499 if (extra->Err.Cond.origin_ec)
500 mc_pp_origin( extra->Err.Cond.origin_ec,
501 extra->Err.Cond.otag & 3 );
506 MC_(any_value_errors) = True;
508 emit( " <kind>SyscallParam</kind>\n" );
509 emiN( " <what>Syscall param %t contains "
510 "uninitialised byte(s)</what>\n",
511 VG_(get_error_string)(err) );
512 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
513 if (extra->Err.RegParam.origin_ec)
514 mc_pp_origin( extra->Err.RegParam.origin_ec,
515 extra->Err.RegParam.otag & 3 );
517 emit( "Syscall param %s contains uninitialised byte(s)\n",
518 VG_(get_error_string)(err) );
519 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
520 if (extra->Err.RegParam.origin_ec)
521 mc_pp_origin( extra->Err.RegParam.origin_ec,
522 extra->Err.RegParam.otag & 3 );
527 if (!extra->Err.MemParam.isAddrErr)
528 MC_(any_value_errors) = True;
530 emit( " <kind>SyscallParam</kind>\n" );
531 emiN( " <what>Syscall param %t points to %s byte(s)</what>\n",
532 VG_(get_error_string)(err),
533 extra->Err.MemParam.isAddrErr
534 ? "unaddressable" : "uninitialised" );
535 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
536 mc_pp_AddrInfo(VG_(get_error_address)(err),
537 &extra->Err.MemParam.ai, False);
538 if (extra->Err.MemParam.origin_ec
539 && !extra->Err.MemParam.isAddrErr)
540 mc_pp_origin( extra->Err.MemParam.origin_ec,
541 extra->Err.MemParam.otag & 3 );
543 emit( "Syscall param %s points to %s byte(s)\n",
544 VG_(get_error_string)(err),
545 extra->Err.MemParam.isAddrErr
546 ? "unaddressable" : "uninitialised" );
547 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
548 mc_pp_AddrInfo(VG_(get_error_address)(err),
549 &extra->Err.MemParam.ai, False);
550 if (extra->Err.MemParam.origin_ec
551 && !extra->Err.MemParam.isAddrErr)
552 mc_pp_origin( extra->Err.MemParam.origin_ec,
553 extra->Err.MemParam.otag & 3 );
558 if (!extra->Err.User.isAddrErr)
559 MC_(any_value_errors) = True;
561 emit( " <kind>ClientCheck</kind>\n" );
562 emit( " <what>%s byte(s) found "
563 "during client check request</what>\n",
564 extra->Err.User.isAddrErr
565 ? "Unaddressable" : "Uninitialised" );
566 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
567 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
569 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
570 mc_pp_origin( extra->Err.User.origin_ec,
571 extra->Err.User.otag & 3 );
573 emit( "%s byte(s) found during client check request\n",
574 extra->Err.User.isAddrErr
575 ? "Unaddressable" : "Uninitialised" );
576 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
577 mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
579 if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
580 mc_pp_origin( extra->Err.User.origin_ec,
581 extra->Err.User.otag & 3 );
587 emit( " <kind>InvalidFree</kind>\n" );
588 emit( " <what>Invalid free() / delete / delete[]"
589 " / realloc()</what>\n" );
590 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
591 mc_pp_AddrInfo( VG_(get_error_address)(err),
592 &extra->Err.Free.ai, False );
594 emit( "Invalid free() / delete / delete[] / realloc()\n" );
595 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
596 mc_pp_AddrInfo( VG_(get_error_address)(err),
597 &extra->Err.Free.ai, False );
601 case Err_FreeMismatch:
603 emit( " <kind>MismatchedFree</kind>\n" );
604 emit( " <what>Mismatched free() / delete / delete []</what>\n" );
605 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
606 mc_pp_AddrInfo(VG_(get_error_address)(err),
607 &extra->Err.FreeMismatch.ai, False);
609 emit( "Mismatched free() / delete / delete []\n" );
610 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
611 mc_pp_AddrInfo(VG_(get_error_address)(err),
612 &extra->Err.FreeMismatch.ai, False);
618 emit( " <kind>Invalid%s</kind>\n",
619 extra->Err.Addr.isWrite ? "Write" : "Read" );
620 emit( " <what>Invalid %s of size %ld</what>\n",
621 extra->Err.Addr.isWrite ? "write" : "read",
622 extra->Err.Addr.szB );
623 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
624 mc_pp_AddrInfo( VG_(get_error_address)(err),
626 extra->Err.Addr.maybe_gcc );
628 emit( "Invalid %s of size %ld\n",
629 extra->Err.Addr.isWrite ? "write" : "read",
630 extra->Err.Addr.szB );
631 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
633 mc_pp_AddrInfo( VG_(get_error_address)(err),
635 extra->Err.Addr.maybe_gcc );
641 emit( " <kind>InvalidJump</kind>\n" );
642 emit( " <what>Jump to the invalid address stated "
643 "on the next line</what>\n" );
644 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
645 mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
648 emit( "Jump to the invalid address stated on the next line\n" );
649 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
650 mc_pp_AddrInfo( VG_(get_error_address)(err), &extra->Err.Jump.ai,
657 emit( " <kind>Overlap</kind>\n" );
658 if (extra->Err.Overlap.szB == 0) {
659 emiN( " <what>Source and destination overlap "
660 "in %t(%#lx, %#lx)\n</what>\n",
661 VG_(get_error_string)(err),
662 extra->Err.Overlap.dst, extra->Err.Overlap.src );
664 emit( " <what>Source and destination overlap "
665 "in %s(%#lx, %#lx, %d)</what>\n",
666 VG_(get_error_string)(err),
667 extra->Err.Overlap.dst, extra->Err.Overlap.src,
668 extra->Err.Overlap.szB );
670 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
672 if (extra->Err.Overlap.szB == 0) {
673 emiN( "Source and destination overlap in %t(%#lx, %#lx)\n",
674 VG_(get_error_string)(err),
675 extra->Err.Overlap.dst, extra->Err.Overlap.src );
677 emit( "Source and destination overlap in %s(%#lx, %#lx, %d)\n",
678 VG_(get_error_string)(err),
679 extra->Err.Overlap.dst, extra->Err.Overlap.src,
680 extra->Err.Overlap.szB );
682 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
686 case Err_IllegalMempool:
687 // JRS 17 May 09: None of our regtests exercise this; hence AFAIK
688 // the following code is untested. Bad.
690 emit( " <kind>InvalidMemPool</kind>\n" );
691 emit( " <what>Illegal memory pool address</what>\n" );
692 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
693 mc_pp_AddrInfo( VG_(get_error_address)(err),
694 &extra->Err.IllegalMempool.ai, False );
696 emit( "Illegal memory pool address\n" );
697 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
698 mc_pp_AddrInfo( VG_(get_error_address)(err),
699 &extra->Err.IllegalMempool.ai, False );
704 UInt n_this_record = extra->Err.Leak.n_this_record;
705 UInt n_total_records = extra->Err.Leak.n_total_records;
706 LossRecord* lr = extra->Err.Leak.lr;
708 emit(" <kind>%s</kind>\n", xml_leak_kind(lr->key.state));
709 if (lr->indirect_szB > 0) {
710 emit( " <xwhat>\n" );
711 emit( " <text>%'lu (%'lu direct, %'lu indirect) bytes "
713 " are %s in loss record %'u of %'u</text>\n",
714 lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
716 str_leak_lossmode(lr->key.state),
717 n_this_record, n_total_records );
718 // Nb: don't put commas in these XML numbers
719 emit( " <leakedbytes>%lu</leakedbytes>\n",
720 lr->szB + lr->indirect_szB );
721 emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks );
722 emit( " </xwhat>\n" );
724 emit( " <xwhat>\n" );
725 emit( " <text>%'lu bytes in %'u blocks"
726 " are %s in loss record %'u of %'u</text>\n",
727 lr->szB, lr->num_blocks,
728 str_leak_lossmode(lr->key.state),
729 n_this_record, n_total_records );
730 emit( " <leakedbytes>%ld</leakedbytes>\n", lr->szB);
731 emit( " <leakedblocks>%d</leakedblocks>\n", lr->num_blocks);
732 emit( " </xwhat>\n" );
734 VG_(pp_ExeContext)(lr->key.allocated_at);
735 } else { /* ! if (xml) */
736 if (lr->indirect_szB > 0) {
738 "%'lu (%'lu direct, %'lu indirect) bytes in %'u blocks"
739 " are %s in loss record %'u of %'u\n",
740 lr->szB + lr->indirect_szB, lr->szB, lr->indirect_szB,
741 lr->num_blocks, str_leak_lossmode(lr->key.state),
742 n_this_record, n_total_records
746 "%'lu bytes in %'u blocks are %s in loss record %'u of %'u\n",
747 lr->szB, lr->num_blocks, str_leak_lossmode(lr->key.state),
748 n_this_record, n_total_records
751 VG_(pp_ExeContext)(lr->key.allocated_at);
757 VG_(printf)("Error:\n unknown Memcheck error code %d\n",
758 VG_(get_error_kind)(err));
759 VG_(tool_panic)("unknown error code in mc_pp_Error)");
763 /*------------------------------------------------------------*/
764 /*--- Recording errors ---*/
765 /*------------------------------------------------------------*/
767 /* These many bytes below %ESP are considered addressible if we're
768 doing the --workaround-gcc296-bugs hack. */
769 #define VG_GCC296_BUG_STACK_SLOP 1024
771 /* Is this address within some small distance below %ESP? Used only
772 for the --workaround-gcc296-bugs kludge. */
773 static Bool is_just_below_ESP( Addr esp, Addr aa )
775 esp -= VG_STACK_REDZONE_SZB;
776 if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
782 /* --- Called from generated and non-generated code --- */
784 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
790 if (MC_(in_ignored_range)(a))
793 if (VG_(is_watched)( (isWrite ? write_watchpoint : read_watchpoint), a, szB))
796 # if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
797 /* AIX zero-page handling. On AIX, reads from page zero are,
798 bizarrely enough, legitimate. Writes to page zero aren't,
799 though. Since memcheck can't distinguish reads from writes, the
800 best we can do is to 'act normal' and mark the A bits in the
801 normal way as noaccess, but then hide any reads from that page
802 that get reported here. */
803 if ((!isWrite) && a >= 0 && a < 4096 && a+szB <= 4096)
806 /* Appalling AIX hack. It suppresses reads done by glink
807 fragments. Getting rid of this would require figuring out
808 somehow where the referenced data areas are (and their
810 if ((!isWrite) && szB == sizeof(Word)) {
812 UInt* pc = (UInt*)VG_(get_IP)(tid);
813 if (sizeof(Word) == 4) {
814 i1 = 0x800c0000; /* lwz r0,0(r12) */
815 i2 = 0x804c0004; /* lwz r2,4(r12) */
817 i1 = 0xe80c0000; /* ld r0,0(r12) */
818 i2 = 0xe84c0008; /* ld r2,8(r12) */
820 if (pc[0] == i1 && pc[1] == i2) return;
821 if (pc[0] == i2 && pc[-1] == i1) return;
825 just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
827 /* If this is caused by an access immediately below %ESP, and the
828 user asks nicely, we just ignore it. */
829 if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
832 extra.Err.Addr.isWrite = isWrite;
833 extra.Err.Addr.szB = szB;
834 extra.Err.Addr.maybe_gcc = just_below_esp;
835 extra.Err.Addr.ai.tag = Addr_Undescribed;
836 VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
839 void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
842 tl_assert( MC_(clo_mc_level) >= 2 );
844 tl_assert( MC_(clo_mc_level) == 3 );
845 extra.Err.Value.szB = szB;
846 extra.Err.Value.otag = otag;
847 extra.Err.Value.origin_ec = NULL; /* Filled in later */
848 VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
851 void MC_(record_cond_error) ( ThreadId tid, UInt otag )
854 tl_assert( MC_(clo_mc_level) >= 2 );
856 tl_assert( MC_(clo_mc_level) == 3 );
857 extra.Err.Cond.otag = otag;
858 extra.Err.Cond.origin_ec = NULL; /* Filled in later */
859 VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
862 /* --- Called from non-generated code --- */
864 /* This is for memory errors in signal-related memory. */
865 void MC_(record_core_mem_error) ( ThreadId tid, Char* msg )
867 VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
870 void MC_(record_regparam_error) ( ThreadId tid, Char* msg, UInt otag )
873 tl_assert(VG_INVALID_THREADID != tid);
875 tl_assert( MC_(clo_mc_level) == 3 );
876 extra.Err.RegParam.otag = otag;
877 extra.Err.RegParam.origin_ec = NULL; /* Filled in later */
878 VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
881 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
882 Bool isAddrErr, Char* msg, UInt otag )
885 tl_assert(VG_INVALID_THREADID != tid);
887 tl_assert( MC_(clo_mc_level) >= 2 );
889 tl_assert( MC_(clo_mc_level) == 3 );
890 tl_assert( !isAddrErr );
892 extra.Err.MemParam.isAddrErr = isAddrErr;
893 extra.Err.MemParam.ai.tag = Addr_Undescribed;
894 extra.Err.MemParam.otag = otag;
895 extra.Err.MemParam.origin_ec = NULL; /* Filled in later */
896 VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
899 void MC_(record_jump_error) ( ThreadId tid, Addr a )
902 tl_assert(VG_INVALID_THREADID != tid);
903 extra.Err.Jump.ai.tag = Addr_Undescribed;
904 VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
907 void MC_(record_free_error) ( ThreadId tid, Addr a )
910 tl_assert(VG_INVALID_THREADID != tid);
911 extra.Err.Free.ai.tag = Addr_Undescribed;
912 VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
915 void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
918 AddrInfo* ai = &extra.Err.FreeMismatch.ai;
919 tl_assert(VG_INVALID_THREADID != tid);
920 ai->tag = Addr_Block;
921 ai->Addr.Block.block_kind = Block_Mallocd; // Nb: Not 'Block_Freed'
922 ai->Addr.Block.block_desc = "block";
923 ai->Addr.Block.block_szB = mc->szB;
924 ai->Addr.Block.rwoffset = 0;
925 ai->Addr.Block.lastchange = mc->where;
926 VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
930 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
933 tl_assert(VG_INVALID_THREADID != tid);
934 extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
935 VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
938 void MC_(record_overlap_error) ( ThreadId tid, Char* function,
939 Addr src, Addr dst, SizeT szB )
942 tl_assert(VG_INVALID_THREADID != tid);
943 extra.Err.Overlap.src = src;
944 extra.Err.Overlap.dst = dst;
945 extra.Err.Overlap.szB = szB;
946 VG_(maybe_record_error)(
947 tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
950 Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
951 UInt n_total_records, LossRecord* lr,
952 Bool print_record, Bool count_error )
955 extra.Err.Leak.n_this_record = n_this_record;
956 extra.Err.Leak.n_total_records = n_total_records;
957 extra.Err.Leak.lr = lr;
959 VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
960 lr->key.allocated_at, print_record,
961 /*allow_GDB_attach*/False, count_error );
964 void MC_(record_user_error) ( ThreadId tid, Addr a,
965 Bool isAddrErr, UInt otag )
969 tl_assert(!isAddrErr);
970 tl_assert( MC_(clo_mc_level) == 3 );
973 tl_assert( MC_(clo_mc_level) >= 2 );
975 tl_assert(VG_INVALID_THREADID != tid);
976 extra.Err.User.isAddrErr = isAddrErr;
977 extra.Err.User.ai.tag = Addr_Undescribed;
978 extra.Err.User.otag = otag;
979 extra.Err.User.origin_ec = NULL; /* Filled in later */
980 VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
983 /*------------------------------------------------------------*/
984 /*--- Other error operations ---*/
985 /*------------------------------------------------------------*/
987 /* Compare error contexts, to detect duplicates. Note that if they
988 are otherwise the same, the faulting addrs and associated rwoffsets
989 are allowed to be different. */
990 Bool MC_(eq_Error) ( VgRes res, Error* e1, Error* e2 )
992 MC_Error* extra1 = VG_(get_error_extra)(e1);
993 MC_Error* extra2 = VG_(get_error_extra)(e2);
995 /* Guaranteed by calling function */
996 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
998 switch (VG_(get_error_kind)(e1)) {
1001 e1s = VG_(get_error_string)(e1);
1002 e2s = VG_(get_error_string)(e2);
1003 if (e1s == e2s) return True;
1004 if (VG_STREQ(e1s, e2s)) return True;
1009 return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
1011 // Perhaps we should also check the addrinfo.akinds for equality.
1012 // That would result in more error reports, but only in cases where
1013 // a register contains uninitialised bytes and points to memory
1014 // containing uninitialised bytes. Currently, the 2nd of those to be
1015 // detected won't be reported. That is (nearly?) always the memory
1016 // error, which is good.
1018 if (!VG_STREQ(VG_(get_error_string)(e1),
1019 VG_(get_error_string)(e2))) return False;
1022 return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
1026 case Err_FreeMismatch:
1028 case Err_IllegalMempool:
1034 return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
1038 return ( extra1->Err.Value.szB == extra2->Err.Value.szB
1042 VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
1043 "since it's handled with VG_(unique_error)()!");
1046 VG_(printf)("Error:\n unknown error code %d\n",
1047 VG_(get_error_kind)(e1));
1048 VG_(tool_panic)("unknown error code in mc_eq_Error");
1052 /* Functions used when searching MC_Chunk lists */
1054 Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
1056 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1057 MC_MALLOC_REDZONE_SZB );
1060 Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
1062 return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1066 // Forward declarations
1067 static Bool client_block_maybe_describe( Addr a, AddrInfo* ai );
1068 static Bool mempool_block_maybe_describe( Addr a, AddrInfo* ai );
1071 /* Describe an address as best you can, for error messages,
1072 putting the result in ai. */
1073 static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
1077 Addr stack_min, stack_max;
1080 tl_assert(Addr_Undescribed == ai->tag);
1082 /* -- Perhaps it's a user-named block? -- */
1083 if (client_block_maybe_describe( a, ai )) {
1086 /* -- Perhaps it's in mempool block? -- */
1087 if (mempool_block_maybe_describe( a, ai )) {
1090 /* -- Search for a recently freed block which might bracket it. -- */
1091 mc = MC_(get_freed_list_head)();
1093 if (addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
1094 ai->tag = Addr_Block;
1095 ai->Addr.Block.block_kind = Block_Freed;
1096 ai->Addr.Block.block_desc = "block";
1097 ai->Addr.Block.block_szB = mc->szB;
1098 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1099 ai->Addr.Block.lastchange = mc->where;
1104 /* -- Search for a currently malloc'd block which might bracket it. -- */
1105 VG_(HT_ResetIter)(MC_(malloc_list));
1106 while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
1107 if (addr_is_in_MC_Chunk_default_REDZONE_SZB(mc, a)) {
1108 ai->tag = Addr_Block;
1109 ai->Addr.Block.block_kind = Block_Mallocd;
1110 ai->Addr.Block.block_desc = "block";
1111 ai->Addr.Block.block_szB = mc->szB;
1112 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1113 ai->Addr.Block.lastchange = mc->where;
1117 /* -- Perhaps the variable type/location data describes it? -- */
1118 ai->Addr.Variable.descr1
1119 = VG_(newXA)( VG_(malloc), "mc.da.descr1",
1120 VG_(free), sizeof(HChar) );
1121 ai->Addr.Variable.descr2
1122 = VG_(newXA)( VG_(malloc), "mc.da.descr2",
1123 VG_(free), sizeof(HChar) );
1125 (void) VG_(get_data_description)( ai->Addr.Variable.descr1,
1126 ai->Addr.Variable.descr2, a );
1127 /* If there's nothing in descr1/2, free them. Why is it safe to to
1128 VG_(indexXA) at zero here? Because VG_(get_data_description)
1129 guarantees to zero terminate descr1/2 regardless of the outcome
1130 of the call. So there's always at least one element in each XA
1133 if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr1, 0 ))) {
1134 VG_(deleteXA)( ai->Addr.Variable.descr1 );
1135 ai->Addr.Variable.descr1 = NULL;
1137 if (0 == VG_(strlen)( VG_(indexXA)( ai->Addr.Variable.descr2, 0 ))) {
1138 VG_(deleteXA)( ai->Addr.Variable.descr2 );
1139 ai->Addr.Variable.descr2 = NULL;
1141 /* Assume (assert) that VG_(get_data_description) fills in descr1
1142 before it fills in descr2 */
1143 if (ai->Addr.Variable.descr1 == NULL)
1144 tl_assert(ai->Addr.Variable.descr2 == NULL);
1145 /* So did we get lucky? */
1146 if (ai->Addr.Variable.descr1 != NULL) {
1147 ai->tag = Addr_Variable;
1150 /* -- Have a look at the low level data symbols - perhaps it's in
1152 VG_(memset)( &ai->Addr.DataSym.name,
1153 0, sizeof(ai->Addr.DataSym.name));
1154 if (VG_(get_datasym_and_offset)(
1155 a, &ai->Addr.DataSym.name[0],
1156 sizeof(ai->Addr.DataSym.name)-1,
1157 &ai->Addr.DataSym.offset )) {
1158 ai->tag = Addr_DataSym;
1159 tl_assert( ai->Addr.DataSym.name
1160 [ sizeof(ai->Addr.DataSym.name)-1 ] == 0);
1163 /* -- Perhaps it's on a thread's stack? -- */
1164 VG_(thread_stack_reset_iter)(&tid);
1165 while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
1166 if (stack_min - VG_STACK_REDZONE_SZB <= a && a <= stack_max) {
1167 ai->tag = Addr_Stack;
1168 ai->Addr.Stack.tid = tid;
1172 /* -- last ditch attempt at classification -- */
1173 tl_assert( sizeof(ai->Addr.SectKind.objname) > 4 );
1174 VG_(memset)( &ai->Addr.SectKind.objname,
1175 0, sizeof(ai->Addr.SectKind.objname));
1176 VG_(strcpy)( ai->Addr.SectKind.objname, "???" );
1177 sect = VG_(DebugInfo_sect_kind)( &ai->Addr.SectKind.objname[0],
1178 sizeof(ai->Addr.SectKind.objname)-1, a);
1179 if (sect != Vg_SectUnknown) {
1180 ai->tag = Addr_SectKind;
1181 ai->Addr.SectKind.kind = sect;
1182 tl_assert( ai->Addr.SectKind.objname
1183 [ sizeof(ai->Addr.SectKind.objname)-1 ] == 0);
1186 /* -- Clueless ... -- */
1187 ai->tag = Addr_Unknown;
1191 void MC_(pp_describe_addr) ( Addr a )
1195 ai.tag = Addr_Undescribed;
1196 describe_addr (a, &ai);
1197 mc_pp_AddrInfo (a, &ai, /* maybe_gcc */ False);
1200 /* Fill in *origin_ec as specified by otag, or NULL it out if otag
1201 does not refer to a known origin. */
1202 static void update_origin ( /*OUT*/ExeContext** origin_ec,
1205 UInt ecu = otag & ~3;
1207 if (VG_(is_plausible_ECU)(ecu)) {
1208 *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
1212 /* Updates the copy with address info if necessary (but not for all errors). */
1213 UInt MC_(update_Error_extra)( Error* err )
1215 MC_Error* extra = VG_(get_error_extra)(err);
1217 switch (VG_(get_error_kind)(err)) {
1218 // These ones don't have addresses associated with them, and so don't
1219 // need any updating.
1224 // For Err_Leaks the returned size does not matter -- they are always
1225 // shown with VG_(unique_error)() so they 'extra' not copied. But
1226 // we make it consistent with the others.
1228 return sizeof(MC_Error);
1230 // For value errors, get the ExeContext corresponding to the
1231 // origin tag. Note that it is a kludge to assume that
1232 // a length-1 trace indicates a stack origin. FIXME.
1234 update_origin( &extra->Err.Value.origin_ec,
1235 extra->Err.Value.otag );
1236 return sizeof(MC_Error);
1238 update_origin( &extra->Err.Cond.origin_ec,
1239 extra->Err.Cond.otag );
1240 return sizeof(MC_Error);
1242 update_origin( &extra->Err.RegParam.origin_ec,
1243 extra->Err.RegParam.otag );
1244 return sizeof(MC_Error);
1246 // These ones always involve a memory address.
1248 describe_addr ( VG_(get_error_address)(err),
1249 &extra->Err.Addr.ai );
1250 return sizeof(MC_Error);
1252 describe_addr ( VG_(get_error_address)(err),
1253 &extra->Err.MemParam.ai );
1254 update_origin( &extra->Err.MemParam.origin_ec,
1255 extra->Err.MemParam.otag );
1256 return sizeof(MC_Error);
1258 describe_addr ( VG_(get_error_address)(err),
1259 &extra->Err.Jump.ai );
1260 return sizeof(MC_Error);
1262 describe_addr ( VG_(get_error_address)(err),
1263 &extra->Err.User.ai );
1264 update_origin( &extra->Err.User.origin_ec,
1265 extra->Err.User.otag );
1266 return sizeof(MC_Error);
1268 describe_addr ( VG_(get_error_address)(err),
1269 &extra->Err.Free.ai );
1270 return sizeof(MC_Error);
1271 case Err_IllegalMempool:
1272 describe_addr ( VG_(get_error_address)(err),
1273 &extra->Err.IllegalMempool.ai );
1274 return sizeof(MC_Error);
1276 // Err_FreeMismatches have already had their address described; this is
1277 // possible because we have the MC_Chunk on hand when the error is
1278 // detected. However, the address may be part of a user block, and if so
1279 // we override the pre-determined description with a user block one.
1280 case Err_FreeMismatch: {
1281 tl_assert(extra && Block_Mallocd ==
1282 extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
1283 (void)client_block_maybe_describe( VG_(get_error_address)(err),
1284 &extra->Err.FreeMismatch.ai );
1285 return sizeof(MC_Error);
1288 default: VG_(tool_panic)("mc_update_extra: bad errkind");
1293 static Bool client_block_maybe_describe( Addr a,
1294 /*OUT*/AddrInfo* ai )
1297 CGenBlock* cgbs = NULL;
1300 MC_(get_ClientBlock_array)( &cgbs, &cgb_used );
1302 tl_assert(cgb_used == 0);
1304 /* Perhaps it's a general block ? */
1305 for (i = 0; i < cgb_used; i++) {
1306 if (cgbs[i].start == 0 && cgbs[i].size == 0)
1308 // Use zero as the redzone for client blocks.
1309 if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
1310 ai->tag = Addr_Block;
1311 ai->Addr.Block.block_kind = Block_UserG;
1312 ai->Addr.Block.block_desc = cgbs[i].desc;
1313 ai->Addr.Block.block_szB = cgbs[i].size;
1314 ai->Addr.Block.rwoffset = (Word)(a) - (Word)(cgbs[i].start);
1315 ai->Addr.Block.lastchange = cgbs[i].where;
1323 static Bool mempool_block_maybe_describe( Addr a,
1324 /*OUT*/AddrInfo* ai )
1327 tl_assert( MC_(mempool_list) );
1329 VG_(HT_ResetIter)( MC_(mempool_list) );
1330 while ( (mp = VG_(HT_Next)(MC_(mempool_list))) ) {
1331 if (mp->chunks != NULL) {
1333 VG_(HT_ResetIter)(mp->chunks);
1334 while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
1335 if (addr_is_in_MC_Chunk_with_REDZONE_SZB(mc, a, mp->rzB)) {
1336 ai->tag = Addr_Block;
1337 ai->Addr.Block.block_kind = Block_MempoolChunk;
1338 ai->Addr.Block.block_desc = "block";
1339 ai->Addr.Block.block_szB = mc->szB;
1340 ai->Addr.Block.rwoffset = (Word)a - (Word)mc->data;
1341 ai->Addr.Block.lastchange = mc->where;
1351 /*------------------------------------------------------------*/
1352 /*--- Suppressions ---*/
1353 /*------------------------------------------------------------*/
1357 ParamSupp, // Bad syscall params
1358 UserSupp, // Errors arising from client-request checks
1359 CoreMemSupp, // Memory errors in core (pthread ops, signal handling)
1361 // Undefined value errors of given size
1362 Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
1364 // Undefined value error in conditional.
1367 // Unaddressable read/write attempt at given size
1368 Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
1370 JumpSupp, // Jump to unaddressable target
1371 FreeSupp, // Invalid or mismatching free
1372 OverlapSupp, // Overlapping blocks in memcpy(), strcpy(), etc
1373 LeakSupp, // Something to be suppressed in a leak check.
1374 MempoolSupp, // Memory pool suppression.
1378 Bool MC_(is_recognised_suppression) ( Char* name, Supp* su )
1382 if (VG_STREQ(name, "Param")) skind = ParamSupp;
1383 else if (VG_STREQ(name, "User")) skind = UserSupp;
1384 else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
1385 else if (VG_STREQ(name, "Addr1")) skind = Addr1Supp;
1386 else if (VG_STREQ(name, "Addr2")) skind = Addr2Supp;
1387 else if (VG_STREQ(name, "Addr4")) skind = Addr4Supp;
1388 else if (VG_STREQ(name, "Addr8")) skind = Addr8Supp;
1389 else if (VG_STREQ(name, "Addr16")) skind = Addr16Supp;
1390 else if (VG_STREQ(name, "Jump")) skind = JumpSupp;
1391 else if (VG_STREQ(name, "Free")) skind = FreeSupp;
1392 else if (VG_STREQ(name, "Leak")) skind = LeakSupp;
1393 else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
1394 else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
1395 else if (VG_STREQ(name, "Cond")) skind = CondSupp;
1396 else if (VG_STREQ(name, "Value0")) skind = CondSupp; /* backwards compat */
1397 else if (VG_STREQ(name, "Value1")) skind = Value1Supp;
1398 else if (VG_STREQ(name, "Value2")) skind = Value2Supp;
1399 else if (VG_STREQ(name, "Value4")) skind = Value4Supp;
1400 else if (VG_STREQ(name, "Value8")) skind = Value8Supp;
1401 else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
1405 VG_(set_supp_kind)(su, skind);
1409 Bool MC_(read_extra_suppression_info) ( Int fd, Char** bufpp,
1410 SizeT* nBufp, Supp *su )
1414 if (VG_(get_supp_kind)(su) == ParamSupp) {
1415 eof = VG_(get_line) ( fd, bufpp, nBufp, NULL );
1416 if (eof) return False;
1417 VG_(set_supp_string)(su, VG_(strdup)("mc.resi.1", *bufpp));
1422 Bool MC_(error_matches_suppression) ( Error* err, Supp* su )
1425 MC_Error* extra = VG_(get_error_extra)(err);
1426 ErrorKind ekind = VG_(get_error_kind )(err);
1428 switch (VG_(get_supp_kind)(su)) {
1430 return ((ekind == Err_RegParam || ekind == Err_MemParam)
1431 && VG_STREQ(VG_(get_error_string)(err),
1432 VG_(get_supp_string)(su)));
1435 return (ekind == Err_User);
1438 return (ekind == Err_CoreMem
1439 && VG_STREQ(VG_(get_error_string)(err),
1440 VG_(get_supp_string)(su)));
1442 case Value1Supp: su_szB = 1; goto value_case;
1443 case Value2Supp: su_szB = 2; goto value_case;
1444 case Value4Supp: su_szB = 4; goto value_case;
1445 case Value8Supp: su_szB = 8; goto value_case;
1446 case Value16Supp:su_szB =16; goto value_case;
1448 return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
1451 return (ekind == Err_Cond);
1453 case Addr1Supp: su_szB = 1; goto addr_case;
1454 case Addr2Supp: su_szB = 2; goto addr_case;
1455 case Addr4Supp: su_szB = 4; goto addr_case;
1456 case Addr8Supp: su_szB = 8; goto addr_case;
1457 case Addr16Supp:su_szB =16; goto addr_case;
1459 return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
1462 return (ekind == Err_Jump);
1465 return (ekind == Err_Free || ekind == Err_FreeMismatch);
1468 return (ekind == Err_Overlap);
1471 return (ekind == Err_Leak);
1474 return (ekind == Err_IllegalMempool);
1477 VG_(printf)("Error:\n"
1478 " unknown suppression type %d\n",
1479 VG_(get_supp_kind)(su));
1480 VG_(tool_panic)("unknown suppression type in "
1481 "MC_(error_matches_suppression)");
1485 Char* MC_(get_error_name) ( Error* err )
1487 switch (VG_(get_error_kind)(err)) {
1488 case Err_RegParam: return "Param";
1489 case Err_MemParam: return "Param";
1490 case Err_User: return "User";
1491 case Err_FreeMismatch: return "Free";
1492 case Err_IllegalMempool: return "Mempool";
1493 case Err_Free: return "Free";
1494 case Err_Jump: return "Jump";
1495 case Err_CoreMem: return "CoreMem";
1496 case Err_Overlap: return "Overlap";
1497 case Err_Leak: return "Leak";
1498 case Err_Cond: return "Cond";
1500 MC_Error* extra = VG_(get_error_extra)(err);
1501 switch ( extra->Err.Addr.szB ) {
1502 case 1: return "Addr1";
1503 case 2: return "Addr2";
1504 case 4: return "Addr4";
1505 case 8: return "Addr8";
1506 case 16: return "Addr16";
1507 default: VG_(tool_panic)("unexpected size for Addr");
1511 MC_Error* extra = VG_(get_error_extra)(err);
1512 switch ( extra->Err.Value.szB ) {
1513 case 1: return "Value1";
1514 case 2: return "Value2";
1515 case 4: return "Value4";
1516 case 8: return "Value8";
1517 case 16: return "Value16";
1518 default: VG_(tool_panic)("unexpected size for Value");
1521 default: VG_(tool_panic)("get_error_name: unexpected type");
1525 Bool MC_(get_extra_suppression_info) ( Error* err,
1526 /*OUT*/Char* buf, Int nBuf )
1528 ErrorKind ekind = VG_(get_error_kind )(err);
1530 tl_assert(nBuf >= 16); // stay sane
1531 if (Err_RegParam == ekind || Err_MemParam == ekind) {
1532 Char* errstr = VG_(get_error_string)(err);
1534 VG_(snprintf)(buf, nBuf-1, "%s", errstr);
1542 /*--------------------------------------------------------------------*/
1543 /*--- end mc_errors.c ---*/
1544 /*--------------------------------------------------------------------*/