2 /*--------------------------------------------------------------------*/
3 /*--- Error management for Helgrind. ---*/
4 /*--- hg_errors.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2010 OpenWorks Ltd
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_libcbase.h"
34 #include "pub_tool_libcassert.h"
35 #include "pub_tool_libcprint.h"
36 #include "pub_tool_execontext.h"
37 #include "pub_tool_errormgr.h"
38 #include "pub_tool_wordfm.h"
39 #include "pub_tool_xarray.h"
40 #include "pub_tool_debuginfo.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_options.h" // VG_(clo_xml)
44 #include "hg_basics.h"
45 #include "hg_wordset.h"
46 #include "hg_lock_n_thread.h"
48 #include "hg_errors.h" /* self */
51 /*----------------------------------------------------------------*/
52 /*--- Error management -- storage ---*/
53 /*----------------------------------------------------------------*/
55 /* maps (by value) strings to a copy of them in ARENA_TOOL */
57 static WordFM* string_table = NULL;
59 ULong HG_(stats__string_table_queries) = 0;
61 ULong HG_(stats__string_table_get_map_size) ( void ) {
62 return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
65 static Word string_table_cmp ( UWord s1, UWord s2 ) {
66 return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
69 static HChar* string_table_strdup ( HChar* str ) {
71 HG_(stats__string_table_queries)++;
75 string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
76 HG_(free), string_table_cmp );
77 tl_assert(string_table);
79 if (VG_(lookupFM)( string_table,
80 NULL, (Word*)©, (Word)str )) {
82 if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
85 copy = HG_(strdup)("hg.sts.2", str);
87 VG_(addToFM)( string_table, (Word)copy, (Word)copy );
92 /* maps from Lock .unique fields to LockP*s */
94 static WordFM* map_LockN_to_P = NULL;
96 ULong HG_(stats__LockN_to_P_queries) = 0;
98 ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
99 return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
102 static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
104 Lock* lk1 = (Lock*)lk1W;
105 Lock* lk2 = (Lock*)lk2W;
106 tl_assert( HG_(is_sane_LockNorP)(lk1) );
107 tl_assert( HG_(is_sane_LockNorP)(lk2) );
108 if (lk1->unique < lk2->unique) return -1;
109 if (lk1->unique > lk2->unique) return 1;
113 static Lock* mk_LockP_from_LockN ( Lock* lkn )
116 HG_(stats__LockN_to_P_queries)++;
117 tl_assert( HG_(is_sane_LockN)(lkn) );
118 if (!map_LockN_to_P) {
119 map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
120 HG_(free), lock_unique_cmp );
121 tl_assert(map_LockN_to_P);
123 if (!VG_(lookupFM)( map_LockN_to_P, NULL, (Word*)&lkp, (Word)lkn)) {
124 lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
127 lkp->magic = LockP_MAGIC;
128 /* Forget about the bag of lock holders - don't copy that.
129 Also, acquired_at should be NULL whenever heldBy is, and vice
130 versa. Also forget about the associated libhb synch object. */
133 lkp->acquired_at = NULL;
135 VG_(addToFM)( map_LockN_to_P, (Word)lkp, (Word)lkp );
137 tl_assert( HG_(is_sane_LockP)(lkp) );
143 race: program counter
149 FIXME: how does state printing interact with lockset gc?
150 Are the locksets in prev/curr state always valid?
151 Ditto question for the threadsets
152 ThreadSets - probably are always valid if Threads
153 are never thrown away.
154 LockSets - could at least print the lockset elements that
155 correspond to actual locks at the time of printing. Hmm.
161 XE_Race=1101, // race
162 XE_UnlockUnlocked, // unlocking a not-locked lock
163 XE_UnlockForeign, // unlocking a lock held by some other thread
164 XE_UnlockBogus, // unlocking an address not known to be a lock
165 XE_PthAPIerror, // error from the POSIX pthreads API
166 XE_LockOrder, // lock order error
167 XE_Misc // misc other error (w/ string to describe it)
171 /* Extra contexts for kinds */
181 /* descr1/2 provide a description of stack/global locs */
182 XArray* descr1; /* XArray* of HChar */
183 XArray* descr2; /* XArray* of HChar */
184 /* halloc/haddr/hszB describe the addr if it is a heap block. */
188 /* h1_* and h2_* provide some description of a previously
189 observed access with which we are conflicting. */
190 Thread* h1_ct; /* non-NULL means h1 info present */
191 ExeContext* h1_ct_mbsegstartEC;
192 ExeContext* h1_ct_mbsegendEC;
193 Thread* h2_ct; /* non-NULL means h2 info present */
194 ExeContext* h2_ct_accEC;
199 Thread* thr; /* doing the unlocking */
200 Lock* lock; /* lock (that is already unlocked) */
203 Thread* thr; /* doing the unlocking */
204 Thread* owner; /* thread that actually holds the lock */
205 Lock* lock; /* lock (that is held by 'owner') */
208 Thread* thr; /* doing the unlocking */
209 Addr lock_ga; /* purported address of the lock */
213 HChar* fnname; /* persistent, in tool-arena */
214 Word err; /* pth error code */
215 HChar* errstr; /* persistent, in tool-arena */
219 Addr before_ga; /* always locked first in prog. history */
221 ExeContext* before_ec;
222 ExeContext* after_ec;
226 HChar* errstr; /* persistent, in tool-arena */
232 static void init_XError ( XError* xe ) {
233 VG_(memset)(xe, 0, sizeof(*xe) );
234 xe->tag = XE_Race-1; /* bogus */
238 /* Extensions of suppressions */
241 XS_Race=1201, /* race */
253 /* Updates the copy with address info if necessary. */
254 UInt HG_(update_extra) ( Error* err )
256 XError* xe = (XError*)VG_(get_error_extra)(err);
258 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
259 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
262 if (xe->tag == XE_Race) {
264 /* See if we can come up with a source level description of the
265 raced-upon address. This is potentially expensive, which is
266 why it's only done at the update_extra point, not when the
267 error is initially created. */
271 VG_(printf)("HG_(update_extra): "
272 "%d conflicting-event queries\n", xxx);
274 tl_assert(!xe->XE.Race.hctxt);
275 tl_assert(!xe->XE.Race.descr1);
276 tl_assert(!xe->XE.Race.descr2);
278 /* First, see if it's in any heap block. Unfortunately this
279 means a linear search through all allocated heap blocks. The
280 assertion says that if it's detected as a heap block, then we
281 must have an allocation context for it, since all heap blocks
282 should have an allocation context. */
284 = HG_(mm_find_containing_block)(
285 &xe->XE.Race.hctxt, &xe->XE.Race.haddr, &xe->XE.Race.hszB,
286 xe->XE.Race.data_addr
288 tl_assert(is_heapblock == (xe->XE.Race.hctxt != NULL));
290 if (!xe->XE.Race.hctxt) {
291 /* It's not in any heap block. See if we can map it to a
292 stack or global symbol. */
295 = VG_(newXA)( HG_(zalloc), "hg.update_extra.Race.descr1",
296 HG_(free), sizeof(HChar) );
298 = VG_(newXA)( HG_(zalloc), "hg.update_extra.Race.descr2",
299 HG_(free), sizeof(HChar) );
301 (void) VG_(get_data_description)( xe->XE.Race.descr1,
303 xe->XE.Race.data_addr );
305 /* If there's nothing in descr1/2, free it. Why is it safe to
306 to VG_(indexXA) at zero here? Because
307 VG_(get_data_description) guarantees to zero terminate
308 descr1/2 regardless of the outcome of the call. So there's
309 always at least one element in each XA after the call.
311 if (0 == VG_(strlen)( VG_(indexXA)( xe->XE.Race.descr1, 0 ))) {
312 VG_(deleteXA)( xe->XE.Race.descr1 );
313 xe->XE.Race.descr1 = NULL;
315 if (0 == VG_(strlen)( VG_(indexXA)( xe->XE.Race.descr2, 0 ))) {
316 VG_(deleteXA)( xe->XE.Race.descr2 );
317 xe->XE.Race.descr2 = NULL;
321 /* And poke around in the conflicting-event map, to see if we
322 can rustle up a plausible-looking conflicting memory access
324 if (HG_(clo_history_level) >= 2) {
326 ExeContext* wherep = NULL;
327 Addr acc_addr = xe->XE.Race.data_addr;
328 Int acc_szB = xe->XE.Race.szB;
329 Thr* acc_thr = xe->XE.Race.thr->hbthr;
330 Bool acc_isW = xe->XE.Race.isWrite;
332 Bool conf_isW = False;
333 tl_assert(!xe->XE.Race.h2_ct_accEC);
334 tl_assert(!xe->XE.Race.h2_ct);
335 if (libhb_event_map_lookup(
336 &wherep, &thrp, &conf_szB, &conf_isW,
337 acc_thr, acc_addr, acc_szB, acc_isW )) {
341 threadp = libhb_get_Thr_opaque( thrp );
343 xe->XE.Race.h2_ct_accEC = wherep;
344 xe->XE.Race.h2_ct = threadp;
345 xe->XE.Race.h2_ct_accSzB = (Int)conf_szB;
346 xe->XE.Race.h2_ct_accIsW = conf_isW;
350 // both NULL or both non-NULL
351 tl_assert( (!!xe->XE.Race.h2_ct) == (!!xe->XE.Race.h2_ct_accEC) );
354 return sizeof(XError);
357 void HG_(record_error_Race) ( Thread* thr,
358 Addr data_addr, Int szB, Bool isWrite,
360 ExeContext* h1_ct_segstart,
361 ExeContext* h1_ct_mbsegendEC )
364 tl_assert( HG_(is_sane_Thread)(thr) );
366 # if defined(VGO_linux)
367 /* Skip any races on locations apparently in GOTPLT sections. This
368 is said to be caused by ld.so poking PLT table entries (or
369 whatever) when it writes the resolved address of a dynamically
370 linked routine, into the table (or whatever) when it is called
371 for the first time. */
373 VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, 0, data_addr );
374 if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
375 data_addr, VG_(pp_SectKind)(sect));
376 /* SectPLT is required on ???-linux */
377 if (sect == Vg_SectGOTPLT) return;
378 /* SectPLT is required on ppc32/64-linux */
379 if (sect == Vg_SectPLT) return;
385 xe.XE.Race.data_addr = data_addr;
386 xe.XE.Race.szB = szB;
387 xe.XE.Race.isWrite = isWrite;
388 xe.XE.Race.thr = thr;
389 tl_assert(isWrite == False || isWrite == True);
390 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
391 /* Skip on the detailed description of the raced-on address at this
392 point; it's expensive. Leave it for the update_extra function
393 if we ever make it that far. */
394 tl_assert(xe.XE.Race.descr1 == NULL);
395 tl_assert(xe.XE.Race.descr2 == NULL);
397 // Skip on any of the conflicting-access info at this point.
398 // It's expensive to obtain, and this error is more likely than
399 // not to be discarded. We'll fill these fields in in
400 // HG_(update_extra) just above, assuming the error ever makes
401 // it that far (unlikely).
402 xe.XE.Race.h2_ct_accSzB = 0;
403 xe.XE.Race.h2_ct_accIsW = False;
404 xe.XE.Race.h2_ct_accEC = NULL;
405 xe.XE.Race.h2_ct = NULL;
406 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
407 tl_assert( thr->coretid != VG_INVALID_THREADID );
409 xe.XE.Race.h1_ct = h1_ct;
410 xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart;
411 xe.XE.Race.h1_ct_mbsegendEC = h1_ct_mbsegendEC;
413 VG_(maybe_record_error)( thr->coretid,
414 XE_Race, data_addr, NULL, &xe );
417 void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
420 tl_assert( HG_(is_sane_Thread)(thr) );
421 tl_assert( HG_(is_sane_LockN)(lk) );
423 xe.tag = XE_UnlockUnlocked;
424 xe.XE.UnlockUnlocked.thr = thr;
425 xe.XE.UnlockUnlocked.lock = mk_LockP_from_LockN(lk);
427 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
428 tl_assert( thr->coretid != VG_INVALID_THREADID );
429 VG_(maybe_record_error)( thr->coretid,
430 XE_UnlockUnlocked, 0, NULL, &xe );
433 void HG_(record_error_UnlockForeign) ( Thread* thr,
434 Thread* owner, Lock* lk )
437 tl_assert( HG_(is_sane_Thread)(thr) );
438 tl_assert( HG_(is_sane_Thread)(owner) );
439 tl_assert( HG_(is_sane_LockN)(lk) );
441 xe.tag = XE_UnlockForeign;
442 xe.XE.UnlockForeign.thr = thr;
443 xe.XE.UnlockForeign.owner = owner;
444 xe.XE.UnlockForeign.lock = mk_LockP_from_LockN(lk);
446 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
447 tl_assert( thr->coretid != VG_INVALID_THREADID );
448 VG_(maybe_record_error)( thr->coretid,
449 XE_UnlockForeign, 0, NULL, &xe );
452 void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
455 tl_assert( HG_(is_sane_Thread)(thr) );
457 xe.tag = XE_UnlockBogus;
458 xe.XE.UnlockBogus.thr = thr;
459 xe.XE.UnlockBogus.lock_ga = lock_ga;
461 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
462 tl_assert( thr->coretid != VG_INVALID_THREADID );
463 VG_(maybe_record_error)( thr->coretid,
464 XE_UnlockBogus, 0, NULL, &xe );
467 void HG_(record_error_LockOrder)(
468 Thread* thr, Addr before_ga, Addr after_ga,
469 ExeContext* before_ec, ExeContext* after_ec
473 tl_assert( HG_(is_sane_Thread)(thr) );
474 if (!HG_(clo_track_lockorders))
477 xe.tag = XE_LockOrder;
478 xe.XE.LockOrder.thr = thr;
479 xe.XE.LockOrder.before_ga = before_ga;
480 xe.XE.LockOrder.before_ec = before_ec;
481 xe.XE.LockOrder.after_ga = after_ga;
482 xe.XE.LockOrder.after_ec = after_ec;
484 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
485 tl_assert( thr->coretid != VG_INVALID_THREADID );
486 VG_(maybe_record_error)( thr->coretid,
487 XE_LockOrder, 0, NULL, &xe );
490 void HG_(record_error_PthAPIerror) ( Thread* thr, HChar* fnname,
491 Word err, HChar* errstr )
494 tl_assert( HG_(is_sane_Thread)(thr) );
498 xe.tag = XE_PthAPIerror;
499 xe.XE.PthAPIerror.thr = thr;
500 xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
501 xe.XE.PthAPIerror.err = err;
502 xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
504 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
505 tl_assert( thr->coretid != VG_INVALID_THREADID );
506 VG_(maybe_record_error)( thr->coretid,
507 XE_PthAPIerror, 0, NULL, &xe );
510 void HG_(record_error_Misc) ( Thread* thr, HChar* errstr )
513 tl_assert( HG_(is_sane_Thread)(thr) );
517 xe.XE.Misc.thr = thr;
518 xe.XE.Misc.errstr = string_table_strdup(errstr);
520 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
521 tl_assert( thr->coretid != VG_INVALID_THREADID );
522 VG_(maybe_record_error)( thr->coretid,
523 XE_Misc, 0, NULL, &xe );
526 Bool HG_(eq_Error) ( VgRes not_used, Error* e1, Error* e2 )
530 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
532 xe1 = (XError*)VG_(get_error_extra)(e1);
533 xe2 = (XError*)VG_(get_error_extra)(e2);
537 switch (VG_(get_error_kind)(e1)) {
539 return xe1->XE.Race.szB == xe2->XE.Race.szB
540 && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
541 && (HG_(clo_cmp_race_err_addrs)
542 ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
544 case XE_UnlockUnlocked:
545 return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
546 && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
547 case XE_UnlockForeign:
548 return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
549 && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
550 && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
552 return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
553 && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
555 return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
556 && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
557 xe2->XE.PthAPIerror.fnname)
558 && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
560 return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
562 return xe1->XE.Misc.thr == xe2->XE.Misc.thr
563 && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
573 /*----------------------------------------------------------------*/
574 /*--- Error management -- printing ---*/
575 /*----------------------------------------------------------------*/
577 /* Do a printf-style operation on either the XML or normal output
578 channel, depending on the setting of VG_(clo_xml).
580 static void emit_WRK ( HChar* format, va_list vargs )
583 VG_(vprintf_xml)(format, vargs);
585 VG_(vmessage)(Vg_UserMsg, format, vargs);
588 static void emit ( HChar* format, ... ) PRINTF_CHECK(1, 2);
589 static void emit ( HChar* format, ... )
592 va_start(vargs, format);
593 emit_WRK(format, vargs);
596 static void emit_no_f_c ( HChar* format, ... )
599 va_start(vargs, format);
600 emit_WRK(format, vargs);
605 /* Announce (that is, print the point-of-creation) of 'thr'. Only do
606 this once, as we only want to see these announcements once per
607 thread. Returned Bool indicates whether or not an announcement was
610 static Bool announce_one_thread ( Thread* thr )
612 tl_assert(HG_(is_sane_Thread)(thr));
613 tl_assert(thr->errmsg_index >= 1);
619 VG_(printf_xml)("<announcethread>\n");
620 VG_(printf_xml)(" <hthreadid>%d</hthreadid>\n", thr->errmsg_index);
621 if (thr->errmsg_index == 1) {
622 tl_assert(thr->created_at == NULL);
623 VG_(printf_xml)(" <isrootthread></isrootthread>\n");
625 tl_assert(thr->created_at != NULL);
626 VG_(pp_ExeContext)( thr->created_at );
628 VG_(printf_xml)("</announcethread>\n\n");
632 if (thr->errmsg_index == 1) {
633 tl_assert(thr->created_at == NULL);
634 VG_(message)(Vg_UserMsg,
635 "Thread #%d is the program's root thread\n",
638 tl_assert(thr->created_at != NULL);
639 VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
641 VG_(pp_ExeContext)( thr->created_at );
643 VG_(message)(Vg_UserMsg, "\n");
647 thr->announced = True;
652 /* This is the "this error is due to be printed shortly; so have a
653 look at it any print any preamble you want" function. We use it to
654 announce any previously un-announced threads in the upcoming error
657 void HG_(before_pp_Error) ( Error* err )
661 xe = (XError*)VG_(get_error_extra)(err);
664 switch (VG_(get_error_kind)(err)) {
666 announce_one_thread( xe->XE.Misc.thr );
669 announce_one_thread( xe->XE.LockOrder.thr );
672 announce_one_thread( xe->XE.PthAPIerror.thr );
675 announce_one_thread( xe->XE.UnlockBogus.thr );
677 case XE_UnlockForeign:
678 announce_one_thread( xe->XE.UnlockForeign.thr );
679 announce_one_thread( xe->XE.UnlockForeign.owner );
681 case XE_UnlockUnlocked:
682 announce_one_thread( xe->XE.UnlockUnlocked.thr );
685 announce_one_thread( xe->XE.Race.thr );
686 if (xe->XE.Race.h2_ct)
687 announce_one_thread( xe->XE.Race.h2_ct );
688 if (xe->XE.Race.h1_ct)
689 announce_one_thread( xe->XE.Race.h1_ct );
696 void HG_(pp_Error) ( Error* err )
698 const Bool xml = VG_(clo_xml); /* a shorthand, that's all */
700 XError *xe = (XError*)VG_(get_error_extra)(err);
703 switch (VG_(get_error_kind)(err)) {
706 tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
710 emit( " <kind>Misc</kind>\n");
711 emit( " <xwhat>\n" );
712 emit( " <text>Thread #%d: %s</text>\n",
713 (Int)xe->XE.Misc.thr->errmsg_index,
714 xe->XE.Misc.errstr );
715 emit( " <hthreadid>%d</hthreadid>\n",
716 (Int)xe->XE.Misc.thr->errmsg_index );
717 emit( " </xwhat>\n" );
718 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
722 emit( "Thread #%d: %s\n",
723 (Int)xe->XE.Misc.thr->errmsg_index,
724 xe->XE.Misc.errstr );
725 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
732 tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
736 emit( " <kind>LockOrder</kind>\n");
737 emit( " <xwhat>\n" );
738 emit( " <text>Thread #%d: lock order \"%p before %p\" "
740 (Int)xe->XE.LockOrder.thr->errmsg_index,
741 (void*)xe->XE.LockOrder.before_ga,
742 (void*)xe->XE.LockOrder.after_ga );
743 emit( " <hthreadid>%d</hthreadid>\n",
744 (Int)xe->XE.LockOrder.thr->errmsg_index );
745 emit( " </xwhat>\n" );
746 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
747 if (xe->XE.LockOrder.before_ec && xe->XE.LockOrder.after_ec) {
748 emit( " <auxwhat>Required order was established by "
749 "acquisition of lock at %p</auxwhat>\n",
750 (void*)xe->XE.LockOrder.before_ga );
751 VG_(pp_ExeContext)( xe->XE.LockOrder.before_ec );
752 emit( " <auxwhat>followed by a later acquisition "
753 "of lock at %p</auxwhat>\n",
754 (void*)xe->XE.LockOrder.after_ga );
755 VG_(pp_ExeContext)( xe->XE.LockOrder.after_ec );
760 emit( "Thread #%d: lock order \"%p before %p\" violated\n",
761 (Int)xe->XE.LockOrder.thr->errmsg_index,
762 (void*)xe->XE.LockOrder.before_ga,
763 (void*)xe->XE.LockOrder.after_ga );
764 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
765 if (xe->XE.LockOrder.before_ec && xe->XE.LockOrder.after_ec) {
766 emit( " Required order was established by "
767 "acquisition of lock at %p\n",
768 (void*)xe->XE.LockOrder.before_ga );
769 VG_(pp_ExeContext)( xe->XE.LockOrder.before_ec );
770 emit( " followed by a later acquisition of lock at %p\n",
771 (void*)xe->XE.LockOrder.after_ga );
772 VG_(pp_ExeContext)( xe->XE.LockOrder.after_ec );
780 case XE_PthAPIerror: {
781 tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
785 emit( " <kind>PthAPIerror</kind>\n");
786 emit( " <xwhat>\n" );
788 " <text>Thread #%d's call to %t failed</text>\n",
789 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
790 xe->XE.PthAPIerror.fnname );
791 emit( " <hthreadid>%d</hthreadid>\n",
792 (Int)xe->XE.PthAPIerror.thr->errmsg_index );
793 emit( " </xwhat>\n" );
794 emit( " <what>with error code %ld (%s)</what>\n",
795 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
796 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
800 emit_no_f_c( "Thread #%d's call to %t failed\n",
801 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
802 xe->XE.PthAPIerror.fnname );
803 emit( " with error code %ld (%s)\n",
804 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
805 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
812 case XE_UnlockBogus: {
813 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
817 emit( " <kind>UnlockBogus</kind>\n");
818 emit( " <xwhat>\n" );
819 emit( " <text>Thread #%d unlocked an invalid "
820 "lock at %p</text>\n",
821 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
822 (void*)xe->XE.UnlockBogus.lock_ga );
823 emit( " <hthreadid>%d</hthreadid>\n",
824 (Int)xe->XE.UnlockBogus.thr->errmsg_index );
825 emit( " </xwhat>\n" );
826 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
830 emit( "Thread #%d unlocked an invalid lock at %p\n",
831 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
832 (void*)xe->XE.UnlockBogus.lock_ga );
833 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
840 case XE_UnlockForeign: {
841 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
842 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
843 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
847 emit( " <kind>UnlockForeign</kind>\n");
848 emit( " <xwhat>\n" );
849 emit( " <text>Thread #%d unlocked lock at %p "
850 "currently held by thread #%d</text>\n",
851 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
852 (void*)xe->XE.UnlockForeign.lock->guestaddr,
853 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
854 emit( " <hthreadid>%d</hthreadid>\n",
855 (Int)xe->XE.UnlockForeign.thr->errmsg_index );
856 emit( " <hthreadid>%d</hthreadid>\n",
857 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
858 emit( " </xwhat>\n" );
859 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
861 if (xe->XE.UnlockForeign.lock->appeared_at) {
862 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
863 (void*)xe->XE.UnlockForeign.lock->guestaddr );
864 VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
869 emit( "Thread #%d unlocked lock at %p "
870 "currently held by thread #%d\n",
871 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
872 (void*)xe->XE.UnlockForeign.lock->guestaddr,
873 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
874 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
875 if (xe->XE.UnlockForeign.lock->appeared_at) {
876 emit( " Lock at %p was first observed\n",
877 (void*)xe->XE.UnlockForeign.lock->guestaddr );
878 VG_(pp_ExeContext)( xe->XE.UnlockForeign.lock->appeared_at );
886 case XE_UnlockUnlocked: {
887 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
888 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
892 emit( " <kind>UnlockUnlocked</kind>\n");
893 emit( " <xwhat>\n" );
894 emit( " <text>Thread #%d unlocked a "
895 "not-locked lock at %p</text>\n",
896 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
897 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
898 emit( " <hthreadid>%d</hthreadid>\n",
899 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
900 emit( " </xwhat>\n" );
901 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
902 if (xe->XE.UnlockUnlocked.lock->appeared_at) {
903 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
904 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
905 VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
910 emit( "Thread #%d unlocked a not-locked lock at %p\n",
911 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
912 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
913 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
914 if (xe->XE.UnlockUnlocked.lock->appeared_at) {
915 emit( " Lock at %p was first observed\n",
916 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
917 VG_(pp_ExeContext)( xe->XE.UnlockUnlocked.lock->appeared_at );
929 what = xe->XE.Race.isWrite ? "write" : "read";
930 szB = xe->XE.Race.szB;
931 err_ga = VG_(get_error_address)(err);
933 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
934 if (xe->XE.Race.h2_ct)
935 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));
939 /* ------ XML ------ */
940 emit( " <kind>Race</kind>\n" );
941 emit( " <xwhat>\n" );
942 emit( " <text>Possible data race during %s of size %d "
943 "at %#lx by thread #%d</text>\n",
944 what, szB, err_ga, (Int)xe->XE.Race.thr->errmsg_index );
945 emit( " <hthreadid>%d</hthreadid>\n",
946 (Int)xe->XE.Race.thr->errmsg_index );
947 emit( " </xwhat>\n" );
948 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
950 if (xe->XE.Race.h2_ct) {
951 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
952 emit( " <xauxwhat>\n");
953 emit( " <text>This conflicts with a previous %s of size %d "
954 "by thread #%d</text>\n",
955 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
956 xe->XE.Race.h2_ct_accSzB,
957 xe->XE.Race.h2_ct->errmsg_index );
958 emit( " <hthreadid>%d</hthreadid>\n",
959 xe->XE.Race.h2_ct->errmsg_index);
960 emit(" </xauxwhat>\n");
961 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
964 if (xe->XE.Race.h1_ct) {
965 emit( " <xauxwhat>\n");
966 emit( " <text>This conflicts with a previous access "
967 "by thread #%d, after</text>\n",
968 xe->XE.Race.h1_ct->errmsg_index );
969 emit( " <hthreadid>%d</hthreadid>\n",
970 xe->XE.Race.h1_ct->errmsg_index );
971 emit(" </xauxwhat>\n");
972 if (xe->XE.Race.h1_ct_mbsegstartEC) {
973 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
975 emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
977 emit( " <auxwhat>but before</auxwhat>\n" );
978 if (xe->XE.Race.h1_ct_mbsegendEC) {
979 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
981 emit( " <auxwhat>(the end of the the thread)</auxwhat>\n" );
987 /* ------ Text ------ */
988 emit( "Possible data race during %s of size %d "
989 "at %#lx by thread #%d\n",
990 what, szB, err_ga, (Int)xe->XE.Race.thr->errmsg_index );
991 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
993 if (xe->XE.Race.h2_ct) {
994 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
995 emit( " This conflicts with a previous %s of size %d "
997 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
998 xe->XE.Race.h2_ct_accSzB,
999 xe->XE.Race.h2_ct->errmsg_index );
1000 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1003 if (xe->XE.Race.h1_ct) {
1004 emit( " This conflicts with a previous access by thread #%d, "
1006 xe->XE.Race.h1_ct->errmsg_index );
1007 if (xe->XE.Race.h1_ct_mbsegstartEC) {
1008 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1010 emit( " (the start of the thread)\n" );
1012 emit( " but before\n" );
1013 if (xe->XE.Race.h1_ct_mbsegendEC) {
1014 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1016 emit( " (the end of the the thread)\n" );
1022 /* If we have a description of the address in terms of a heap
1024 if (xe->XE.Race.hctxt) {
1025 SizeT delta = err_ga - xe->XE.Race.haddr;
1027 emit(" <auxwhat>Address %#lx is %ld bytes inside a block "
1028 "of size %ld alloc'd</auxwhat>\n", err_ga, delta,
1030 VG_(pp_ExeContext)( xe->XE.Race.hctxt );
1032 emit(" Address %#lx is %ld bytes inside a block "
1033 "of size %ld alloc'd\n", err_ga, delta,
1035 VG_(pp_ExeContext)( xe->XE.Race.hctxt );
1039 /* If we have a better description of the address, show it.
1040 Note that in XML mode, it will already by nicely wrapped up
1041 in tags, either <auxwhat> or <xauxwhat>, so we can just emit
1043 if (xe->XE.Race.descr1)
1044 emit( "%s%s\n", xml ? " " : " ",
1045 (HChar*)VG_(indexXA)( xe->XE.Race.descr1, 0 ) );
1046 if (xe->XE.Race.descr2)
1047 emit( "%s%s\n", xml ? " " : " ",
1048 (HChar*)VG_(indexXA)( xe->XE.Race.descr2, 0 ) );
1050 break; /* case XE_Race */
1051 } /* case XE_Race */
1055 } /* switch (VG_(get_error_kind)(err)) */
1058 Char* HG_(get_error_name) ( Error* err )
1060 switch (VG_(get_error_kind)(err)) {
1061 case XE_Race: return "Race";
1062 case XE_UnlockUnlocked: return "UnlockUnlocked";
1063 case XE_UnlockForeign: return "UnlockForeign";
1064 case XE_UnlockBogus: return "UnlockBogus";
1065 case XE_PthAPIerror: return "PthAPIerror";
1066 case XE_LockOrder: return "LockOrder";
1067 case XE_Misc: return "Misc";
1068 default: tl_assert(0); /* fill in missing case */
1072 Bool HG_(recognised_suppression) ( Char* name, Supp *su )
1074 # define TRY(_name,_xskind) \
1075 if (0 == VG_(strcmp)(name, (_name))) { \
1076 VG_(set_supp_kind)(su, (_xskind)); \
1079 TRY("Race", XS_Race);
1080 TRY("FreeMemLock", XS_FreeMemLock);
1081 TRY("UnlockUnlocked", XS_UnlockUnlocked);
1082 TRY("UnlockForeign", XS_UnlockForeign);
1083 TRY("UnlockBogus", XS_UnlockBogus);
1084 TRY("PthAPIerror", XS_PthAPIerror);
1085 TRY("LockOrder", XS_LockOrder);
1086 TRY("Misc", XS_Misc);
1091 Bool HG_(read_extra_suppression_info) ( Int fd, Char** bufpp, SizeT* nBufp,
1094 /* do nothing -- no extra suppression info present. Return True to
1095 indicate nothing bad happened. */
1099 Bool HG_(error_matches_suppression) ( Error* err, Supp* su )
1101 switch (VG_(get_supp_kind)(su)) {
1102 case XS_Race: return VG_(get_error_kind)(err) == XE_Race;
1103 case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
1104 case XS_UnlockForeign: return VG_(get_error_kind)(err) == XE_UnlockForeign;
1105 case XS_UnlockBogus: return VG_(get_error_kind)(err) == XE_UnlockBogus;
1106 case XS_PthAPIerror: return VG_(get_error_kind)(err) == XE_PthAPIerror;
1107 case XS_LockOrder: return VG_(get_error_kind)(err) == XE_LockOrder;
1108 case XS_Misc: return VG_(get_error_kind)(err) == XE_Misc;
1109 //case XS_: return VG_(get_error_kind)(err) == XE_;
1110 default: tl_assert(0); /* fill in missing cases */
1114 Bool HG_(get_extra_suppression_info) ( Error* err,
1115 /*OUT*/Char* buf, Int nBuf )
1122 /*--------------------------------------------------------------------*/
1123 /*--- end hg_errors.c ---*/
1124 /*--------------------------------------------------------------------*/