2 /*--------------------------------------------------------------------*/
3 /*--- Helgrind: a Valgrind tool for detecting errors ---*/
4 /*--- in threaded programs. hg_main.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2010 OpenWorks LLP
14 Copyright (C) 2007-2010 Apple, Inc.
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
31 The GNU General Public License is contained in the file COPYING.
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
39 #include "pub_tool_basics.h"
40 #include "pub_tool_libcassert.h"
41 #include "pub_tool_libcbase.h"
42 #include "pub_tool_libcprint.h"
43 #include "pub_tool_threadstate.h"
44 #include "pub_tool_tooliface.h"
45 #include "pub_tool_hashtable.h"
46 #include "pub_tool_replacemalloc.h"
47 #include "pub_tool_machine.h"
48 #include "pub_tool_options.h"
49 #include "pub_tool_xarray.h"
50 #include "pub_tool_stacktrace.h"
51 #include "pub_tool_wordfm.h"
52 #include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53 #include "pub_tool_redir.h" // sonames for the dynamic linkers
54 #include "pub_tool_vki.h" // VKI_PAGE_SIZE
56 #include "hg_basics.h"
57 #include "hg_wordset.h"
58 #include "hg_lock_n_thread.h"
59 #include "hg_errors.h"
66 // FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
68 // FIXME: when client destroys a lock or a CV, remove these
69 // from our mappings, so that the associated SO can be freed up
71 /*----------------------------------------------------------------*/
73 /*----------------------------------------------------------------*/
75 /* Note this needs to be compiled with -fno-strict-aliasing, since it
76 contains a whole bunch of calls to lookupFM etc which cast between
77 Word and pointer types. gcc rightly complains this breaks ANSI C
78 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
79 worthwhile performance benefits over -O.
82 // FIXME catch sync signals (SEGV, basically) and unlock BHL,
83 // if held. Otherwise a LOCK-prefixed insn which segfaults
84 // gets Helgrind into a total muddle as the BHL will not be
85 // released after the insn.
87 // FIXME what is supposed to happen to locks in memory which
88 // is relocated as a result of client realloc?
90 // FIXME put referencing ThreadId into Thread and get
91 // rid of the slow reverse mapping function.
93 // FIXME accesses to NoAccess areas: change state to Excl?
95 // FIXME report errors for accesses of NoAccess memory?
97 // FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
98 // the thread still holds the lock.
100 /* ------------ Debug/trace options ------------ */
103 // shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
104 // happens_before_wrk: 1000
105 // ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
106 #define SHOW_EXPENSIVE_STUFF 0
108 // 0 for silent, 1 for some stuff, 2 for lots of stuff
109 #define SHOW_EVENTS 0
112 static void all__sanity_check ( Char* who ); /* fwds */
114 #define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
116 // 0 for none, 1 for dump at end of run
117 #define SHOW_DATA_STRUCTURES 0
120 /* ------------ Misc comments ------------ */
122 // FIXME: don't hardwire initial entries for root thread.
123 // Instead, let the pre_thread_ll_create handler do this.
126 /*----------------------------------------------------------------*/
127 /*--- Primary data structures ---*/
128 /*----------------------------------------------------------------*/
130 /* Admin linked list of Threads */
131 static Thread* admin_threads = NULL;
133 /* Admin linked list of Locks */
134 static Lock* admin_locks = NULL;
136 /* Mapping table for core ThreadIds to Thread* */
137 static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
139 /* Mapping table for lock guest addresses to Lock* */
140 static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
142 /* The word-set universes for thread sets and lock sets. */
143 static WordSetU* univ_tsets = NULL; /* sets of Thread* */
144 static WordSetU* univ_lsets = NULL; /* sets of Lock* */
145 static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
147 /* never changed; we only care about its address. Is treated as if it
148 was a standard userspace lock. Also we have a Lock* describing it
149 so it can participate in lock sets in the usual way. */
150 static Int __bus_lock = 0;
151 static Lock* __bus_lock_Lock = NULL;
154 /*----------------------------------------------------------------*/
155 /*--- Simple helpers for the data structures ---*/
156 /*----------------------------------------------------------------*/
158 static UWord stats__lockN_acquires = 0;
159 static UWord stats__lockN_releases = 0;
162 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
164 /* --------- Constructors --------- */
166 static Thread* mk_Thread ( Thr* hbthr ) {
168 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
169 thread->locksetA = HG_(emptyWS)( univ_lsets );
170 thread->locksetW = HG_(emptyWS)( univ_lsets );
171 thread->magic = Thread_MAGIC;
172 thread->hbthr = hbthr;
173 thread->coretid = VG_INVALID_THREADID;
174 thread->created_at = NULL;
175 thread->announced = False;
176 thread->errmsg_index = indx++;
177 thread->admin = admin_threads;
178 admin_threads = thread;
182 // Make a new lock which is unlocked (hence ownerless)
183 static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
184 static ULong unique = 0;
185 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
186 lock->admin = admin_locks;
187 lock->unique = unique++;
188 lock->magic = LockN_MAGIC;
189 lock->appeared_at = NULL;
190 lock->acquired_at = NULL;
191 lock->hbso = libhb_so_alloc();
192 lock->guestaddr = guestaddr;
196 tl_assert(HG_(is_sane_LockN)(lock));
201 /* Release storage for a Lock. Also release storage in .heldBy, if
203 static void del_LockN ( Lock* lk )
205 tl_assert(HG_(is_sane_LockN)(lk));
207 libhb_so_dealloc(lk->hbso);
209 VG_(deleteBag)( lk->heldBy );
210 VG_(memset)(lk, 0xAA, sizeof(*lk));
214 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
215 it. This is done strictly: only combinations resulting from
216 correct program and libpthread behaviour are allowed. */
217 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
219 tl_assert(HG_(is_sane_LockN)(lk));
220 tl_assert(HG_(is_sane_Thread)(thr));
222 stats__lockN_acquires++;
224 /* EXPOSITION only */
225 /* We need to keep recording snapshots of where the lock was
226 acquired, so as to produce better lock-order error messages. */
227 if (lk->acquired_at == NULL) {
229 tl_assert(lk->heldBy == NULL);
230 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
232 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
234 tl_assert(lk->heldBy != NULL);
236 /* end EXPOSITION only */
241 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
242 tl_assert(!lk->heldW);
244 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
245 VG_(addToBag)( lk->heldBy, (Word)thr );
248 if (lk->heldBy == NULL)
250 /* 2nd and subsequent locking of a lock by its owner */
251 tl_assert(lk->heldW);
252 /* assert: lk is only held by one thread .. */
253 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
254 /* assert: .. and that thread is 'thr'. */
255 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
256 == VG_(sizeTotalBag)(lk->heldBy));
257 VG_(addToBag)(lk->heldBy, (Word)thr);
260 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
265 tl_assert(HG_(is_sane_LockN)(lk));
268 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
270 tl_assert(HG_(is_sane_LockN)(lk));
271 tl_assert(HG_(is_sane_Thread)(thr));
272 /* can only add reader to a reader-writer lock. */
273 tl_assert(lk->kind == LK_rdwr);
274 /* lk must be free or already r-held. */
275 tl_assert(lk->heldBy == NULL
276 || (lk->heldBy != NULL && !lk->heldW));
278 stats__lockN_acquires++;
280 /* EXPOSITION only */
281 /* We need to keep recording snapshots of where the lock was
282 acquired, so as to produce better lock-order error messages. */
283 if (lk->acquired_at == NULL) {
285 tl_assert(lk->heldBy == NULL);
286 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
288 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
290 tl_assert(lk->heldBy != NULL);
292 /* end EXPOSITION only */
295 VG_(addToBag)(lk->heldBy, (Word)thr);
298 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
299 VG_(addToBag)( lk->heldBy, (Word)thr );
301 tl_assert(!lk->heldW);
302 tl_assert(HG_(is_sane_LockN)(lk));
305 /* Update 'lk' to reflect a release of it by 'thr'. This is done
306 strictly: only combinations resulting from correct program and
307 libpthread behaviour are allowed. */
309 static void lockN_release ( Lock* lk, Thread* thr )
312 tl_assert(HG_(is_sane_LockN)(lk));
313 tl_assert(HG_(is_sane_Thread)(thr));
314 /* lock must be held by someone */
315 tl_assert(lk->heldBy);
316 stats__lockN_releases++;
317 /* Remove it from the holder set */
318 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
319 /* thr must actually have been a holder of lk */
322 tl_assert(lk->acquired_at);
323 if (VG_(isEmptyBag)(lk->heldBy)) {
324 VG_(deleteBag)(lk->heldBy);
327 lk->acquired_at = NULL;
329 tl_assert(HG_(is_sane_LockN)(lk));
332 static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
336 tl_assert(!lk->heldW);
339 /* for each thread that holds this lock do ... */
340 VG_(initIterBag)( lk->heldBy );
341 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
342 tl_assert(HG_(is_sane_Thread)(thr));
343 tl_assert(HG_(elemWS)( univ_lsets,
344 thr->locksetA, (Word)lk ));
346 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetW, (Word)lk ));
352 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
355 VG_(doneIterBag)( lk->heldBy );
359 /*----------------------------------------------------------------*/
360 /*--- Print out the primary data structures ---*/
361 /*----------------------------------------------------------------*/
363 //static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
365 #define PP_THREADS (1<<1)
366 #define PP_LOCKS (1<<2)
367 #define PP_ALL (PP_THREADS | PP_LOCKS)
370 static const Int sHOW_ADMIN = 0;
372 static void space ( Int n )
376 tl_assert(n >= 0 && n < 128);
379 for (i = 0; i < n; i++)
382 tl_assert(i < 128+1);
383 VG_(printf)("%s", spaces);
386 static void pp_Thread ( Int d, Thread* t )
388 space(d+0); VG_(printf)("Thread %p {\n", t);
390 space(d+3); VG_(printf)("admin %p\n", t->admin);
391 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
393 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
394 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
395 space(d+0); VG_(printf)("}\n");
398 static void pp_admin_threads ( Int d )
402 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
405 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
406 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
409 VG_(printf)("admin_threads record %d of %d:\n", i, n);
413 space(d); VG_(printf)("}\n");
416 static void pp_map_threads ( Int d )
419 space(d); VG_(printf)("map_threads ");
420 for (i = 0; i < VG_N_THREADS; i++) {
421 if (map_threads[i] != NULL)
424 VG_(printf)("(%d entries) {\n", n);
425 for (i = 0; i < VG_N_THREADS; i++) {
426 if (map_threads[i] == NULL)
429 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
431 space(d); VG_(printf)("}\n");
434 static const HChar* show_LockKind ( LockKind lkk ) {
436 case LK_mbRec: return "mbRec";
437 case LK_nonRec: return "nonRec";
438 case LK_rdwr: return "rdwr";
439 default: tl_assert(0);
443 static void pp_Lock ( Int d, Lock* lk )
445 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
447 space(d+3); VG_(printf)("admin %p\n", lk->admin);
448 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
450 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
451 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
452 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
453 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
458 VG_(initIterBag)( lk->heldBy );
459 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
460 VG_(printf)("%lu:%p ", count, thr);
461 VG_(doneIterBag)( lk->heldBy );
465 space(d+0); VG_(printf)("}\n");
468 static void pp_admin_locks ( Int d )
472 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
475 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
476 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
479 VG_(printf)("admin_locks record %d of %d:\n", i, n);
483 space(d); VG_(printf)("}\n");
486 static void pp_map_locks ( Int d )
490 space(d); VG_(printf)("map_locks (%d entries) {\n",
491 (Int)VG_(sizeFM)( map_locks ));
492 VG_(initIterFM)( map_locks );
493 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
496 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
498 VG_(doneIterFM)( map_locks );
499 space(d); VG_(printf)("}\n");
502 static void pp_everything ( Int flags, Char* caller )
506 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
507 if (flags & PP_THREADS) {
509 pp_admin_threads(d+3);
513 if (flags & PP_LOCKS) {
528 /*----------------------------------------------------------------*/
529 /*--- Initialise the primary data structures ---*/
530 /*----------------------------------------------------------------*/
532 static void initialise_data_structures ( Thr* hbthr_root )
536 /* Get everything initialised and zeroed. */
537 tl_assert(admin_threads == NULL);
538 tl_assert(admin_locks == NULL);
540 tl_assert(sizeof(Addr) == sizeof(Word));
542 tl_assert(map_threads == NULL);
543 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
544 tl_assert(map_threads != NULL);
546 tl_assert(sizeof(Addr) == sizeof(Word));
547 tl_assert(map_locks == NULL);
548 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
549 NULL/*unboxed Word cmp*/);
550 tl_assert(map_locks != NULL);
552 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
553 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
554 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
556 tl_assert(univ_tsets == NULL);
557 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
559 tl_assert(univ_tsets != NULL);
561 tl_assert(univ_lsets == NULL);
562 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
564 tl_assert(univ_lsets != NULL);
566 tl_assert(univ_laog == NULL);
567 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
568 HG_(free), 24/*cacheSize*/ );
569 tl_assert(univ_laog != NULL);
571 /* Set up entries for the root thread */
572 // FIXME: this assumes that the first real ThreadId is 1
574 /* a Thread for the new thread ... */
575 thr = mk_Thread(hbthr_root);
576 thr->coretid = 1; /* FIXME: hardwires an assumption about the
577 identity of the root thread. */
578 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
579 libhb_set_Thr_opaque(hbthr_root, thr);
581 /* and bind it in the thread-map table. */
582 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
583 tl_assert(thr->coretid != VG_INVALID_THREADID);
585 map_threads[thr->coretid] = thr;
587 tl_assert(VG_INVALID_THREADID == 0);
589 /* Mark the new bus lock correctly (to stop the sanity checks
591 tl_assert( sizeof(__bus_lock) == 4 );
593 all__sanity_check("initialise_data_structures");
597 /*----------------------------------------------------------------*/
598 /*--- map_threads :: array[core-ThreadId] of Thread* ---*/
599 /*----------------------------------------------------------------*/
601 /* Doesn't assert if the relevant map_threads entry is NULL. */
602 static Thread* map_threads_maybe_lookup ( ThreadId coretid )
605 tl_assert( HG_(is_sane_ThreadId)(coretid) );
606 thr = map_threads[coretid];
610 /* Asserts if the relevant map_threads entry is NULL. */
611 static inline Thread* map_threads_lookup ( ThreadId coretid )
614 tl_assert( HG_(is_sane_ThreadId)(coretid) );
615 thr = map_threads[coretid];
620 /* Do a reverse lookup. Does not assert if 'thr' is not found in
622 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
625 tl_assert(HG_(is_sane_Thread)(thr));
626 /* Check nobody used the invalid-threadid slot */
627 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
628 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
630 tl_assert(HG_(is_sane_ThreadId)(tid));
634 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
635 is not found in map_threads. */
636 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
638 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
639 tl_assert(tid != VG_INVALID_THREADID);
640 tl_assert(map_threads[tid]);
641 tl_assert(map_threads[tid]->coretid == tid);
645 static void map_threads_delete ( ThreadId coretid )
648 tl_assert(coretid != 0);
649 tl_assert( HG_(is_sane_ThreadId)(coretid) );
650 thr = map_threads[coretid];
652 map_threads[coretid] = NULL;
656 /*----------------------------------------------------------------*/
657 /*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
658 /*----------------------------------------------------------------*/
660 /* Make sure there is a lock table entry for the given (lock) guest
661 address. If not, create one of the stated 'kind' in unheld state.
662 In any case, return the address of the existing or new Lock. */
664 Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
667 Lock* oldlock = NULL;
668 tl_assert(HG_(is_sane_ThreadId)(tid));
669 found = VG_(lookupFM)( map_locks,
670 NULL, (Word*)&oldlock, (Word)ga );
672 Lock* lock = mk_LockN(lkk, ga);
673 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
674 tl_assert(HG_(is_sane_LockN)(lock));
675 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
676 tl_assert(oldlock == NULL);
679 tl_assert(oldlock != NULL);
680 tl_assert(HG_(is_sane_LockN)(oldlock));
681 tl_assert(oldlock->guestaddr == ga);
686 static Lock* map_locks_maybe_lookup ( Addr ga )
690 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
691 tl_assert(found ? lk != NULL : lk == NULL);
695 static void map_locks_delete ( Addr ga )
699 VG_(delFromFM)( map_locks,
700 (Word*)&ga2, (Word*)&lk, (Word)ga );
701 /* delFromFM produces the val which is being deleted, if it is
702 found. So assert it is non-null; that in effect asserts that we
703 are deleting a (ga, Lock) pair which actually exists. */
704 tl_assert(lk != NULL);
705 tl_assert(ga2 == ga);
710 /*----------------------------------------------------------------*/
711 /*--- Sanity checking the data structures ---*/
712 /*----------------------------------------------------------------*/
714 static UWord stats__sanity_checks = 0;
716 static void laog__sanity_check ( Char* who ); /* fwds */
718 /* REQUIRED INVARIANTS:
720 Thread vs Segment/Lock/SecMaps
722 for each t in Threads {
724 // Thread.lockset: each element is really a valid Lock
726 // Thread.lockset: each Lock in set is actually held by that thread
727 for lk in Thread.lockset
730 // Thread.csegid is a valid SegmentID
731 // and the associated Segment has .thr == t
735 all thread Locksets are pairwise empty under intersection
736 (that is, no lock is claimed to be held by more than one thread)
737 -- this is guaranteed if all locks in locksets point back to their
740 Lock vs Thread/Segment/SecMaps
742 for each entry (gla, la) in map_locks
743 gla == la->guest_addr
745 for each lk in Locks {
748 lk->guest_addr does not have shadow state NoAccess
749 if lk == LockedBy(t), then t->lockset contains lk
750 if lk == UnlockedBy(segid) then segid is valid SegmentID
751 and can be mapped to a valid Segment(seg)
752 and seg->thr->lockset does not contain lk
753 if lk == UnlockedNew then (no lockset contains lk)
755 secmaps for lk has .mbHasLocks == True
759 Segment vs Thread/Lock/SecMaps
761 the Segment graph is a dag (no cycles)
762 all of the Segment graph must be reachable from the segids
763 mentioned in the Threads
765 for seg in Segments {
767 seg->thr is a sane Thread
771 SecMaps vs Segment/Thread/Lock
776 if any shadow word is ShR or ShM then .mbHasShared == True
778 for each Excl(segid) state
779 map_segments_lookup maps to a sane Segment(seg)
780 for each ShM/ShR(tsetid,lsetid) state
781 each lk in lset is a valid Lock
782 each thr in tset is a valid thread, which is non-dead
788 /* Return True iff 'thr' holds 'lk' in some mode. */
789 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
792 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
797 /* Sanity check Threads, as far as possible */
798 __attribute__((noinline))
799 static void threads__sanity_check ( Char* who )
801 #define BAD(_str) do { how = (_str); goto bad; } while (0)
802 Char* how = "no error";
808 for (thr = admin_threads; thr; thr = thr->admin) {
809 if (!HG_(is_sane_Thread)(thr)) BAD("1");
812 // locks held in W mode are a subset of all locks held
813 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
814 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
815 for (i = 0; i < ls_size; i++) {
816 lk = (Lock*)ls_words[i];
817 // Thread.lockset: each element is really a valid Lock
818 if (!HG_(is_sane_LockN)(lk)) BAD("2");
819 // Thread.lockset: each Lock in set is actually held by that
821 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
826 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
832 /* Sanity check Locks, as far as possible */
833 __attribute__((noinline))
834 static void locks__sanity_check ( Char* who )
836 #define BAD(_str) do { how = (_str); goto bad; } while (0)
837 Char* how = "no error";
841 // # entries in admin_locks == # entries in map_locks
842 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
844 if (i != VG_(sizeFM)(map_locks)) BAD("1");
845 // for each entry (gla, lk) in map_locks
846 // gla == lk->guest_addr
847 VG_(initIterFM)( map_locks );
848 while (VG_(nextIterFM)( map_locks,
849 (Word*)&gla, (Word*)&lk )) {
850 if (lk->guestaddr != gla) BAD("2");
852 VG_(doneIterFM)( map_locks );
853 // scan through admin_locks ...
854 for (lk = admin_locks; lk; lk = lk->admin) {
855 // lock is sane. Quite comprehensive, also checks that
856 // referenced (holder) threads are sane.
857 if (!HG_(is_sane_LockN)(lk)) BAD("3");
858 // map_locks binds guest address back to this lock
859 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
860 // look at all threads mentioned as holders of this lock. Ensure
861 // this lock is mentioned in their locksets.
865 VG_(initIterBag)( lk->heldBy );
866 while (VG_(nextIterBag)( lk->heldBy,
867 (Word*)&thr, &count )) {
868 // HG_(is_sane_LockN) above ensures these
869 tl_assert(count >= 1);
870 tl_assert(HG_(is_sane_Thread)(thr));
871 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
873 // also check the w-only lockset
875 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
878 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
881 VG_(doneIterBag)( lk->heldBy );
883 /* lock not held by anybody */
884 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
885 // since lk is unheld, then (no lockset contains lk)
886 // hmm, this is really too expensive to check. Hmm.
892 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
898 static void all_except_Locks__sanity_check ( Char* who ) {
899 stats__sanity_checks++;
900 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
901 threads__sanity_check(who);
902 laog__sanity_check(who);
904 static void all__sanity_check ( Char* who ) {
905 all_except_Locks__sanity_check(who);
906 locks__sanity_check(who);
910 /*----------------------------------------------------------------*/
911 /*--- the core memory state machine (msm__* functions) ---*/
912 /*----------------------------------------------------------------*/
914 //static WordSetID add_BHL ( WordSetID lockset ) {
915 // return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
917 //static WordSetID del_BHL ( WordSetID lockset ) {
918 // return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
922 ///* Last-lock-lossage records. This mechanism exists to help explain
923 // to programmers why we are complaining about a race. The idea is to
924 // monitor all lockset transitions. When a previously nonempty
925 // lockset becomes empty, the lock(s) that just disappeared (the
926 // "lossage") are the locks that have consistently protected the
927 // location (ga_of_access) in question for the longest time. Most of
928 // the time the lossage-set is a single lock. Because the
929 // lossage-lock is the one that has survived longest, there is there
930 // is a good chance that it is indeed the lock that the programmer
931 // intended to use to protect the location.
933 // Note that we cannot in general just look at the lossage set when we
934 // see a transition to ShM(...,empty-set), because a transition to an
935 // empty lockset can happen arbitrarily far before the point where we
936 // want to report an error. This is in the case where there are many
937 // transitions ShR -> ShR, all with an empty lockset, and only later
938 // is there a transition to ShM. So what we want to do is note the
939 // lossage lock at the point where a ShR -> ShR transition empties out
940 // the lockset, so we can present it later if there should be a
941 // transition to ShM.
943 // So this function finds such transitions. For each, it associates
944 // in ga_to_lastlock, the guest address and the lossage lock. In fact
945 // we do not record the Lock* directly as that may disappear later,
946 // but instead the ExeContext inside the Lock which says where it was
947 // initialised or first locked. ExeContexts are permanent so keeping
948 // them indefinitely is safe.
950 // A boring detail: the hardware bus lock is not interesting in this
951 // respect, so we first remove that from the pre/post locksets.
954 //static UWord stats__ga_LL_adds = 0;
956 //static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
959 //void record_last_lock_lossage ( Addr ga_of_access,
960 // WordSetID lset_old, WordSetID lset_new )
963 // Int card_old, card_new;
965 // tl_assert(lset_old != lset_new);
967 // if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
969 // HG_(cardinalityWS)(univ_lsets,lset_old),
971 // HG_(cardinalityWS)(univ_lsets,lset_new),
974 // /* This is slow, but at least it's simple. The bus hardware lock
975 // just confuses the logic, so remove it from the locksets we're
976 // considering before doing anything else. */
977 // lset_new = del_BHL( lset_new );
979 // if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
980 // /* The post-transition lock set is not empty. So we are not
981 // interested. We're only interested in spotting transitions
982 // that make locksets become empty. */
986 // /* lset_new is now empty */
987 // card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
988 // tl_assert(card_new == 0);
990 // lset_old = del_BHL( lset_old );
991 // card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
993 // if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
994 // (Int)lset_old, card_old, (Int)lset_new, card_new );
996 // if (card_old == 0) {
997 // /* The old lockset was also empty. Not interesting. */
1001 // tl_assert(card_old > 0);
1002 // tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1004 // /* Now we know we've got a transition from a nonempty lockset to an
1005 // empty one. So lset_old must be the set of locks lost. Record
1006 // some details. If there is more than one element in the lossage
1007 // set, just choose one arbitrarily -- not the best, but at least
1010 // lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1011 // if (0) VG_(printf)("lossage %ld %p\n",
1012 // HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1013 // if (lk->appeared_at) {
1014 // if (ga_to_lastlock == NULL)
1015 // ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1016 // VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1017 // stats__ga_LL_adds++;
1021 ///* This queries the table (ga_to_lastlock) made by
1022 // record_last_lock_lossage, when constructing error messages. It
1023 // attempts to find the ExeContext of the allocation or initialisation
1024 // point for the lossage lock associated with 'ga'. */
1026 //static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1028 // ExeContext* ec_hint = NULL;
1029 // if (ga_to_lastlock != NULL
1030 // && VG_(lookupFM)(ga_to_lastlock,
1031 // NULL, (Word*)&ec_hint, ga)) {
1032 // tl_assert(ec_hint != NULL);
1040 /*----------------------------------------------------------------*/
1041 /*--- Shadow value and address range handlers ---*/
1042 /*----------------------------------------------------------------*/
1044 static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
1045 //static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
1046 static inline Thread* get_current_Thread ( void ); /* fwds */
1047 __attribute__((noinline))
1048 static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
1051 /* Block-copy states (needed for implementing realloc()). */
1052 /* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1053 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1054 static void shadow_mem_scopy_range ( Thread* thr,
1055 Addr src, Addr dst, SizeT len )
1057 Thr* hbthr = thr->hbthr;
1059 libhb_copy_shadow_state( hbthr, src, dst, len );
1062 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1064 Thr* hbthr = thr->hbthr;
1066 LIBHB_CREAD_N(hbthr, a, len);
1069 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1070 Thr* hbthr = thr->hbthr;
1072 LIBHB_CWRITE_N(hbthr, a, len);
1075 static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1077 libhb_srange_new( thr->hbthr, a, len );
1080 static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1083 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
1084 libhb_srange_noaccess( thr->hbthr, aIN, len );
1087 static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1090 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1091 libhb_srange_untrack( thr->hbthr, aIN, len );
1095 /*----------------------------------------------------------------*/
1096 /*--- Event handlers (evh__* functions) ---*/
1097 /*--- plus helpers (evhH__* functions) ---*/
1098 /*----------------------------------------------------------------*/
1100 /*--------- Event handler helpers (evhH__* functions) ---------*/
1102 /* Create a new segment for 'thr', making it depend (.prev) on its
1103 existing segment, bind together the SegmentID and Segment, and
1104 return both of them. Also update 'thr' so it references the new
1107 //zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1108 //zz /*OUT*/Segment** new_segP,
1111 //zz Segment* cur_seg;
1112 //zz tl_assert(new_segP);
1113 //zz tl_assert(new_segidP);
1114 //zz tl_assert(HG_(is_sane_Thread)(thr));
1115 //zz cur_seg = map_segments_lookup( thr->csegid );
1116 //zz tl_assert(cur_seg);
1117 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1118 //zz at their owner thread. */
1119 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1120 //zz *new_segidP = alloc_SegmentID();
1121 //zz map_segments_add( *new_segidP, *new_segP );
1122 //zz thr->csegid = *new_segidP;
1126 /* The lock at 'lock_ga' has acquired a writer. Make all necessary
1127 updates, and also do all possible error checks. */
1129 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1130 LockKind lkk, Addr lock_ga )
1134 /* Basically what we need to do is call lockN_acquire_writer.
1135 However, that will barf if any 'invalid' lock states would
1136 result. Therefore check before calling. Side effect is that
1137 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1140 Because this routine is only called after successful lock
1141 acquisition, we should not be asked to move the lock into any
1142 invalid states. Requests to do so are bugs in libpthread, since
1143 that should have rejected any such requests. */
1145 tl_assert(HG_(is_sane_Thread)(thr));
1146 /* Try to find the lock. If we can't, then create a new one with
1148 lk = map_locks_lookup_or_create(
1149 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1150 tl_assert( HG_(is_sane_LockN)(lk) );
1152 /* check libhb level entities exist */
1153 tl_assert(thr->hbthr);
1154 tl_assert(lk->hbso);
1156 if (lk->heldBy == NULL) {
1157 /* the lock isn't held. Simple. */
1158 tl_assert(!lk->heldW);
1159 lockN_acquire_writer( lk, thr );
1160 /* acquire a dependency from the lock's VCs */
1161 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1165 /* So the lock is already held. If held as a r-lock then
1166 libpthread must be buggy. */
1167 tl_assert(lk->heldBy);
1169 HG_(record_error_Misc)(
1170 thr, "Bug in libpthread: write lock "
1171 "granted on rwlock which is currently rd-held");
1175 /* So the lock is held in w-mode. If it's held by some other
1176 thread, then libpthread must be buggy. */
1177 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
1179 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1180 HG_(record_error_Misc)(
1181 thr, "Bug in libpthread: write lock "
1182 "granted on mutex/rwlock which is currently "
1183 "wr-held by a different thread");
1187 /* So the lock is already held in w-mode by 'thr'. That means this
1188 is an attempt to lock it recursively, which is only allowable
1189 for LK_mbRec kinded locks. Since this routine is called only
1190 once the lock has been acquired, this must also be a libpthread
1192 if (lk->kind != LK_mbRec) {
1193 HG_(record_error_Misc)(
1194 thr, "Bug in libpthread: recursive write lock "
1195 "granted on mutex/wrlock which does not "
1196 "support recursion");
1200 /* So we are recursively re-locking a lock we already w-hold. */
1201 lockN_acquire_writer( lk, thr );
1202 /* acquire a dependency from the lock's VC. Probably pointless,
1203 but also harmless. */
1204 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1208 /* check lock order acquisition graph, and update. This has to
1209 happen before the lock is added to the thread's locksetA/W. */
1210 laog__pre_thread_acquires_lock( thr, lk );
1211 /* update the thread's held-locks set */
1212 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1213 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1217 tl_assert(HG_(is_sane_LockN)(lk));
1221 /* The lock at 'lock_ga' has acquired a reader. Make all necessary
1222 updates, and also do all possible error checks. */
1224 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1225 LockKind lkk, Addr lock_ga )
1229 /* Basically what we need to do is call lockN_acquire_reader.
1230 However, that will barf if any 'invalid' lock states would
1231 result. Therefore check before calling. Side effect is that
1232 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1235 Because this routine is only called after successful lock
1236 acquisition, we should not be asked to move the lock into any
1237 invalid states. Requests to do so are bugs in libpthread, since
1238 that should have rejected any such requests. */
1240 tl_assert(HG_(is_sane_Thread)(thr));
1241 /* Try to find the lock. If we can't, then create a new one with
1242 kind 'lkk'. Only a reader-writer lock can be read-locked,
1243 hence the first assertion. */
1244 tl_assert(lkk == LK_rdwr);
1245 lk = map_locks_lookup_or_create(
1246 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1247 tl_assert( HG_(is_sane_LockN)(lk) );
1249 /* check libhb level entities exist */
1250 tl_assert(thr->hbthr);
1251 tl_assert(lk->hbso);
1253 if (lk->heldBy == NULL) {
1254 /* the lock isn't held. Simple. */
1255 tl_assert(!lk->heldW);
1256 lockN_acquire_reader( lk, thr );
1257 /* acquire a dependency from the lock's VC */
1258 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1262 /* So the lock is already held. If held as a w-lock then
1263 libpthread must be buggy. */
1264 tl_assert(lk->heldBy);
1266 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1267 "granted on rwlock which is "
1268 "currently wr-held");
1272 /* Easy enough. In short anybody can get a read-lock on a rwlock
1273 provided it is either unlocked or already in rd-held. */
1274 lockN_acquire_reader( lk, thr );
1275 /* acquire a dependency from the lock's VC. Probably pointless,
1276 but also harmless. */
1277 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1281 /* check lock order acquisition graph, and update. This has to
1282 happen before the lock is added to the thread's locksetA/W. */
1283 laog__pre_thread_acquires_lock( thr, lk );
1284 /* update the thread's held-locks set */
1285 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1286 /* but don't update thr->locksetW, since lk is only rd-held */
1290 tl_assert(HG_(is_sane_LockN)(lk));
1294 /* The lock at 'lock_ga' is just about to be unlocked. Make all
1295 necessary updates, and also do all possible error checks. */
1297 void evhH__pre_thread_releases_lock ( Thread* thr,
1298 Addr lock_ga, Bool isRDWR )
1304 /* This routine is called prior to a lock release, before
1305 libpthread has had a chance to validate the call. Hence we need
1306 to detect and reject any attempts to move the lock into an
1307 invalid state. Such attempts are bugs in the client.
1309 isRDWR is True if we know from the wrapper context that lock_ga
1310 should refer to a reader-writer lock, and is False if [ditto]
1311 lock_ga should refer to a standard mutex. */
1313 tl_assert(HG_(is_sane_Thread)(thr));
1314 lock = map_locks_maybe_lookup( lock_ga );
1317 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1318 the client is trying to unlock it. So complain, then ignore
1320 HG_(record_error_UnlockBogus)( thr, lock_ga );
1324 tl_assert(lock->guestaddr == lock_ga);
1325 tl_assert(HG_(is_sane_LockN)(lock));
1327 if (isRDWR && lock->kind != LK_rdwr) {
1328 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1329 "pthread_mutex_t* argument " );
1331 if ((!isRDWR) && lock->kind == LK_rdwr) {
1332 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1333 "pthread_rwlock_t* argument " );
1336 if (!lock->heldBy) {
1337 /* The lock is not held. This indicates a serious bug in the
1339 tl_assert(!lock->heldW);
1340 HG_(record_error_UnlockUnlocked)( thr, lock );
1341 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1342 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1346 /* test just above dominates */
1347 tl_assert(lock->heldBy);
1348 was_heldW = lock->heldW;
1350 /* The lock is held. Is this thread one of the holders? If not,
1351 report a bug in the client. */
1352 n = VG_(elemBag)( lock->heldBy, (Word)thr );
1355 /* We are not a current holder of the lock. This is a bug in
1356 the guest, and (per POSIX pthread rules) the unlock
1357 attempt will fail. So just complain and do nothing
1359 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
1360 tl_assert(HG_(is_sane_Thread)(realOwner));
1361 tl_assert(realOwner != thr);
1362 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1363 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1364 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1368 /* Ok, we hold the lock 'n' times. */
1371 lockN_release( lock, thr );
1377 tl_assert(lock->heldBy);
1378 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
1379 /* We still hold the lock. So either it's a recursive lock
1380 or a rwlock which is currently r-held. */
1381 tl_assert(lock->kind == LK_mbRec
1382 || (lock->kind == LK_rdwr && !lock->heldW));
1383 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1385 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1387 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1389 /* n is zero. This means we don't hold the lock any more. But
1390 if it's a rwlock held in r-mode, someone else could still
1391 hold it. Just do whatever sanity checks we can. */
1392 if (lock->kind == LK_rdwr && lock->heldBy) {
1393 /* It's a rwlock. We no longer hold it but we used to;
1394 nevertheless it still appears to be held by someone else.
1395 The implication is that, prior to this release, it must
1396 have been shared by us and and whoever else is holding it;
1397 which in turn implies it must be r-held, since a lock
1398 can't be w-held by more than one thread. */
1399 /* The lock is now R-held by somebody else: */
1400 tl_assert(lock->heldW == False);
1402 /* Normal case. It's either not a rwlock, or it's a rwlock
1403 that we used to hold in w-mode (which is pretty much the
1404 same thing as a non-rwlock.) Since this transaction is
1405 atomic (V does not allow multiple threads to run
1406 simultaneously), it must mean the lock is now not held by
1407 anybody. Hence assert for it. */
1408 /* The lock is now not held by anybody: */
1409 tl_assert(!lock->heldBy);
1410 tl_assert(lock->heldW == False);
1412 //if (lock->heldBy) {
1413 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1415 /* update this thread's lockset accordingly. */
1417 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1419 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
1420 /* push our VC into the lock */
1421 tl_assert(thr->hbthr);
1422 tl_assert(lock->hbso);
1423 /* If the lock was previously W-held, then we want to do a
1424 strong send, and if previously R-held, then a weak send. */
1425 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1430 tl_assert(HG_(is_sane_LockN)(lock));
1434 /* ---------------------------------------------------------- */
1435 /* -------- Event handlers proper (evh__* functions) -------- */
1436 /* ---------------------------------------------------------- */
1438 /* What is the Thread* for the currently running thread? This is
1439 absolutely performance critical. We receive notifications from the
1440 core for client code starts/stops, and cache the looked-up result
1441 in 'current_Thread'. Hence, for the vast majority of requests,
1442 finding the current thread reduces to a read of a global variable,
1443 provided get_current_Thread_in_C_C is inlined.
1445 Outside of client code, current_Thread is NULL, and presumably
1446 any uses of it will cause a segfault. Hence:
1448 - for uses definitely within client code, use
1449 get_current_Thread_in_C_C.
1451 - for all other uses, use get_current_Thread.
1454 static Thread *current_Thread = NULL,
1455 *current_Thread_prev = NULL;
1457 static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1458 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1459 tl_assert(current_Thread == NULL);
1460 current_Thread = map_threads_lookup( tid );
1461 tl_assert(current_Thread != NULL);
1462 if (current_Thread != current_Thread_prev) {
1463 libhb_Thr_resumes( current_Thread->hbthr );
1464 current_Thread_prev = current_Thread;
1467 static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1468 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1469 tl_assert(current_Thread != NULL);
1470 current_Thread = NULL;
1473 static inline Thread* get_current_Thread_in_C_C ( void ) {
1474 return current_Thread;
1476 static inline Thread* get_current_Thread ( void ) {
1479 thr = get_current_Thread_in_C_C();
1482 /* evidently not in client code. Do it the slow way. */
1483 coretid = VG_(get_running_tid)();
1484 /* FIXME: get rid of the following kludge. It exists because
1485 evh__new_mem is called during initialisation (as notification
1486 of initial memory layout) and VG_(get_running_tid)() returns
1487 VG_INVALID_THREADID at that point. */
1488 if (coretid == VG_INVALID_THREADID)
1489 coretid = 1; /* KLUDGE */
1490 thr = map_threads_lookup( coretid );
1495 void evh__new_mem ( Addr a, SizeT len ) {
1496 if (SHOW_EVENTS >= 2)
1497 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1498 shadow_mem_make_New( get_current_Thread(), a, len );
1499 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1500 all__sanity_check("evh__new_mem-post");
1504 void evh__new_mem_stack ( Addr a, SizeT len ) {
1505 if (SHOW_EVENTS >= 2)
1506 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1507 shadow_mem_make_New( get_current_Thread(),
1508 -VG_STACK_REDZONE_SZB + a, len );
1509 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1510 all__sanity_check("evh__new_mem_stack-post");
1514 void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1515 if (SHOW_EVENTS >= 2)
1516 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1517 shadow_mem_make_New( get_current_Thread(), a, len );
1518 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1519 all__sanity_check("evh__new_mem_w_tid-post");
1523 void evh__new_mem_w_perms ( Addr a, SizeT len,
1524 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
1525 if (SHOW_EVENTS >= 1)
1526 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1527 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1529 shadow_mem_make_New( get_current_Thread(), a, len );
1530 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1531 all__sanity_check("evh__new_mem_w_perms-post");
1535 void evh__set_perms ( Addr a, SizeT len,
1536 Bool rr, Bool ww, Bool xx ) {
1537 if (SHOW_EVENTS >= 1)
1538 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1539 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1540 /* Hmm. What should we do here, that actually makes any sense?
1541 Let's say: if neither readable nor writable, then declare it
1542 NoAccess, else leave it alone. */
1544 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
1545 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1546 all__sanity_check("evh__set_perms-post");
1550 void evh__die_mem ( Addr a, SizeT len ) {
1551 // urr, libhb ignores this.
1552 if (SHOW_EVENTS >= 2)
1553 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1554 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
1555 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1556 all__sanity_check("evh__die_mem-post");
1560 void evh__untrack_mem ( Addr a, SizeT len ) {
1561 // whereas it doesn't ignore this
1562 if (SHOW_EVENTS >= 2)
1563 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1564 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1565 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1566 all__sanity_check("evh__untrack_mem-post");
1570 void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1571 if (SHOW_EVENTS >= 2)
1572 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1573 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1574 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1575 all__sanity_check("evh__copy_mem-post");
1579 void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1581 if (SHOW_EVENTS >= 1)
1582 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1583 (Int)parent, (Int)child );
1585 if (parent != VG_INVALID_THREADID) {
1591 tl_assert(HG_(is_sane_ThreadId)(parent));
1592 tl_assert(HG_(is_sane_ThreadId)(child));
1593 tl_assert(parent != child);
1595 thr_p = map_threads_maybe_lookup( parent );
1596 thr_c = map_threads_maybe_lookup( child );
1598 tl_assert(thr_p != NULL);
1599 tl_assert(thr_c == NULL);
1601 hbthr_p = thr_p->hbthr;
1602 tl_assert(hbthr_p != NULL);
1603 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
1605 hbthr_c = libhb_create ( hbthr_p );
1607 /* Create a new thread record for the child. */
1608 /* a Thread for the new thread ... */
1609 thr_c = mk_Thread( hbthr_c );
1610 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1611 libhb_set_Thr_opaque(hbthr_c, thr_c);
1613 /* and bind it in the thread-map table */
1614 map_threads[child] = thr_c;
1615 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1616 thr_c->coretid = child;
1618 /* Record where the parent is so we can later refer to this in
1621 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1622 The stack snapshot is taken immediately after the parent has
1623 returned from its sys_clone call. Unfortunately there is no
1624 unwind info for the insn following "syscall" - reading the
1625 glibc sources confirms this. So we ask for a snapshot to be
1626 taken as if RIP was 3 bytes earlier, in a place where there
1627 is unwind info. Sigh.
1629 { Word first_ip_delta = 0;
1630 # if defined(VGP_amd64_linux)
1631 first_ip_delta = -3;
1633 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1637 if (HG_(clo_sanity_flags) & SCE_THREADS)
1638 all__sanity_check("evh__pre_thread_create-post");
1642 void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1646 if (SHOW_EVENTS >= 1)
1647 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1650 /* quit_tid has disappeared without joining to any other thread.
1651 Therefore there is no synchronisation event associated with its
1652 exit and so we have to pretty much treat it as if it was still
1653 alive but mysteriously making no progress. That is because, if
1654 we don't know when it really exited, then we can never say there
1655 is a point in time when we're sure the thread really has
1656 finished, and so we need to consider the possibility that it
1657 lingers indefinitely and continues to interact with other
1659 /* However, it might have rendezvous'd with a thread that called
1660 pthread_join with this one as arg, prior to this point (that's
1661 how NPTL works). In which case there has already been a prior
1662 sync event. So in any case, just let the thread exit. On NPTL,
1663 all thread exits go through here. */
1664 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
1665 thr_q = map_threads_maybe_lookup( quit_tid );
1666 tl_assert(thr_q != NULL);
1668 /* Complain if this thread holds any locks. */
1669 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1670 tl_assert(nHeld >= 0);
1673 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1674 nHeld, nHeld > 1 ? "s" : "");
1675 HG_(record_error_Misc)( thr_q, buf );
1678 /* Not much to do here:
1679 - tell libhb the thread is gone
1680 - clear the map_threads entry, in order that the Valgrind core
1682 tl_assert(thr_q->hbthr);
1683 libhb_async_exit(thr_q->hbthr);
1684 tl_assert(thr_q->coretid == quit_tid);
1685 thr_q->coretid = VG_INVALID_THREADID;
1686 map_threads_delete( quit_tid );
1688 if (HG_(clo_sanity_flags) & SCE_THREADS)
1689 all__sanity_check("evh__pre_thread_ll_exit-post");
1694 void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1702 if (SHOW_EVENTS >= 1)
1703 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1704 (Int)stay_tid, quit_thr );
1706 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1708 thr_s = map_threads_maybe_lookup( stay_tid );
1710 tl_assert(thr_s != NULL);
1711 tl_assert(thr_q != NULL);
1712 tl_assert(thr_s != thr_q);
1714 hbthr_s = thr_s->hbthr;
1715 hbthr_q = thr_q->hbthr;
1716 tl_assert(hbthr_s != hbthr_q);
1717 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1718 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
1720 /* Allocate a temporary synchronisation object and use it to send
1721 an imaginary message from the quitter to the stayer, the purpose
1722 being to generate a dependence from the quitter to the
1724 so = libhb_so_alloc();
1726 /* Send last arg of _so_send as False, since the sending thread
1727 doesn't actually exist any more, so we don't want _so_send to
1728 try taking stack snapshots of it. */
1729 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1730 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1731 libhb_so_dealloc(so);
1733 /* evh__pre_thread_ll_exit issues an error message if the exiting
1734 thread holds any locks. No need to check here. */
1736 /* This holds because, at least when using NPTL as the thread
1737 library, we should be notified the low level thread exit before
1738 we hear of any join event on it. The low level exit
1739 notification feeds through into evh__pre_thread_ll_exit,
1740 which should clear the map_threads entry for it. Hence we
1741 expect there to be no map_threads entry at this point. */
1742 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1743 == VG_INVALID_THREADID);
1745 if (HG_(clo_sanity_flags) & SCE_THREADS)
1746 all__sanity_check("evh__post_thread_join-post");
1750 void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1751 Addr a, SizeT size) {
1752 if (SHOW_EVENTS >= 2
1753 || (SHOW_EVENTS >= 1 && size != 1))
1754 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1755 (Int)tid, s, (void*)a, size );
1756 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
1757 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1758 all__sanity_check("evh__pre_mem_read-post");
1762 void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1765 if (SHOW_EVENTS >= 1)
1766 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1767 (Int)tid, s, (void*)a );
1768 // FIXME: think of a less ugly hack
1769 len = VG_(strlen)( (Char*) a );
1770 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
1771 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1772 all__sanity_check("evh__pre_mem_read_asciiz-post");
1776 void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1777 Addr a, SizeT size ) {
1778 if (SHOW_EVENTS >= 1)
1779 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1780 (Int)tid, s, (void*)a, size );
1781 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
1782 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1783 all__sanity_check("evh__pre_mem_write-post");
1787 void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1788 if (SHOW_EVENTS >= 1)
1789 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1790 (void*)a, len, (Int)is_inited );
1791 // FIXME: this is kinda stupid
1793 shadow_mem_make_New(get_current_Thread(), a, len);
1795 shadow_mem_make_New(get_current_Thread(), a, len);
1797 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1798 all__sanity_check("evh__pre_mem_read-post");
1802 void evh__die_mem_heap ( Addr a, SizeT len ) {
1803 if (SHOW_EVENTS >= 1)
1804 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1805 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
1806 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1807 all__sanity_check("evh__pre_mem_read-post");
1810 /* --- Event handlers called from generated code --- */
1812 static VG_REGPARM(1)
1813 void evh__mem_help_cread_1(Addr a) {
1814 Thread* thr = get_current_Thread_in_C_C();
1815 Thr* hbthr = thr->hbthr;
1816 LIBHB_CREAD_1(hbthr, a);
1819 static VG_REGPARM(1)
1820 void evh__mem_help_cread_2(Addr a) {
1821 Thread* thr = get_current_Thread_in_C_C();
1822 Thr* hbthr = thr->hbthr;
1823 LIBHB_CREAD_2(hbthr, a);
1826 static VG_REGPARM(1)
1827 void evh__mem_help_cread_4(Addr a) {
1828 Thread* thr = get_current_Thread_in_C_C();
1829 Thr* hbthr = thr->hbthr;
1830 LIBHB_CREAD_4(hbthr, a);
1833 static VG_REGPARM(1)
1834 void evh__mem_help_cread_8(Addr a) {
1835 Thread* thr = get_current_Thread_in_C_C();
1836 Thr* hbthr = thr->hbthr;
1837 LIBHB_CREAD_8(hbthr, a);
1840 static VG_REGPARM(2)
1841 void evh__mem_help_cread_N(Addr a, SizeT size) {
1842 Thread* thr = get_current_Thread_in_C_C();
1843 Thr* hbthr = thr->hbthr;
1844 LIBHB_CREAD_N(hbthr, a, size);
1847 static VG_REGPARM(1)
1848 void evh__mem_help_cwrite_1(Addr a) {
1849 Thread* thr = get_current_Thread_in_C_C();
1850 Thr* hbthr = thr->hbthr;
1851 LIBHB_CWRITE_1(hbthr, a);
1854 static VG_REGPARM(1)
1855 void evh__mem_help_cwrite_2(Addr a) {
1856 Thread* thr = get_current_Thread_in_C_C();
1857 Thr* hbthr = thr->hbthr;
1858 LIBHB_CWRITE_2(hbthr, a);
1861 static VG_REGPARM(1)
1862 void evh__mem_help_cwrite_4(Addr a) {
1863 Thread* thr = get_current_Thread_in_C_C();
1864 Thr* hbthr = thr->hbthr;
1865 LIBHB_CWRITE_4(hbthr, a);
1868 static VG_REGPARM(1)
1869 void evh__mem_help_cwrite_8(Addr a) {
1870 Thread* thr = get_current_Thread_in_C_C();
1871 Thr* hbthr = thr->hbthr;
1872 LIBHB_CWRITE_8(hbthr, a);
1875 static VG_REGPARM(2)
1876 void evh__mem_help_cwrite_N(Addr a, SizeT size) {
1877 Thread* thr = get_current_Thread_in_C_C();
1878 Thr* hbthr = thr->hbthr;
1879 LIBHB_CWRITE_N(hbthr, a, size);
1883 /* ------------------------------------------------------- */
1884 /* -------------- events to do with mutexes -------------- */
1885 /* ------------------------------------------------------- */
1887 /* EXPOSITION only: by intercepting lock init events we can show the
1888 user where the lock was initialised, rather than only being able to
1889 show where it was first locked. Intercepting lock initialisations
1890 is not necessary for the basic operation of the race checker. */
1892 void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1893 void* mutex, Word mbRec )
1895 if (SHOW_EVENTS >= 1)
1896 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1897 (Int)tid, mbRec, (void*)mutex );
1898 tl_assert(mbRec == 0 || mbRec == 1);
1899 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1901 if (HG_(clo_sanity_flags) & SCE_LOCKS)
1902 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1906 void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1910 if (SHOW_EVENTS >= 1)
1911 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1912 (Int)tid, (void*)mutex );
1914 thr = map_threads_maybe_lookup( tid );
1915 /* cannot fail - Thread* must already exist */
1916 tl_assert( HG_(is_sane_Thread)(thr) );
1918 lk = map_locks_maybe_lookup( (Addr)mutex );
1920 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
1921 HG_(record_error_Misc)(
1922 thr, "pthread_mutex_destroy with invalid argument" );
1926 tl_assert( HG_(is_sane_LockN)(lk) );
1927 tl_assert( lk->guestaddr == (Addr)mutex );
1929 /* Basically act like we unlocked the lock */
1930 HG_(record_error_Misc)(
1931 thr, "pthread_mutex_destroy of a locked mutex" );
1932 /* remove lock from locksets of all owning threads */
1933 remove_Lock_from_locksets_of_all_owning_Threads( lk );
1934 VG_(deleteBag)( lk->heldBy );
1937 lk->acquired_at = NULL;
1939 tl_assert( !lk->heldBy );
1940 tl_assert( HG_(is_sane_LockN)(lk) );
1942 laog__handle_one_lock_deletion(lk);
1943 map_locks_delete( lk->guestaddr );
1947 if (HG_(clo_sanity_flags) & SCE_LOCKS)
1948 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1951 static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1952 void* mutex, Word isTryLock )
1954 /* Just check the mutex is sane; nothing else to do. */
1955 // 'mutex' may be invalid - not checked by wrapper
1958 if (SHOW_EVENTS >= 1)
1959 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1960 (Int)tid, (void*)mutex );
1962 tl_assert(isTryLock == 0 || isTryLock == 1);
1963 thr = map_threads_maybe_lookup( tid );
1964 tl_assert(thr); /* cannot fail - Thread* must already exist */
1966 lk = map_locks_maybe_lookup( (Addr)mutex );
1968 if (lk && (lk->kind == LK_rdwr)) {
1969 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1970 "pthread_rwlock_t* argument " );
1975 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1978 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
1979 /* uh, it's a non-recursive lock and we already w-hold it, and
1980 this is a real lock operation (not a speculative "tryLock"
1981 kind of thing). Duh. Deadlock coming up; but at least
1982 produce an error message. */
1983 HG_(record_error_Misc)( thr, "Attempt to re-lock a "
1984 "non-recursive lock I already hold" );
1988 static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1990 // only called if the real library call succeeded - so mutex is sane
1992 if (SHOW_EVENTS >= 1)
1993 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1994 (Int)tid, (void*)mutex );
1996 thr = map_threads_maybe_lookup( tid );
1997 tl_assert(thr); /* cannot fail - Thread* must already exist */
1999 evhH__post_thread_w_acquires_lock(
2001 LK_mbRec, /* if not known, create new lock with this LockKind */
2006 static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2008 // 'mutex' may be invalid - not checked by wrapper
2010 if (SHOW_EVENTS >= 1)
2011 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2012 (Int)tid, (void*)mutex );
2014 thr = map_threads_maybe_lookup( tid );
2015 tl_assert(thr); /* cannot fail - Thread* must already exist */
2017 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2020 static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2022 // only called if the real library call succeeded - so mutex is sane
2024 if (SHOW_EVENTS >= 1)
2025 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2026 (Int)tid, (void*)mutex );
2027 thr = map_threads_maybe_lookup( tid );
2028 tl_assert(thr); /* cannot fail - Thread* must already exist */
2030 // anything we should do here?
2034 /* ------------------------------------------------------- */
2035 /* -------------- events to do with spinlocks ------------ */
2036 /* ------------------------------------------------------- */
2038 /* All a bit of a kludge. Pretend we're really dealing with ordinary
2039 pthread_mutex_t's instead, for the most part. */
2041 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2046 /* In glibc's kludgey world, we're either initialising or unlocking
2047 it. Since this is the pre-routine, if it is locked, unlock it
2048 and take a dependence edge. Otherwise, do nothing. */
2050 if (SHOW_EVENTS >= 1)
2051 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2052 "(ctid=%d, slock=%p)\n",
2053 (Int)tid, (void*)slock );
2055 thr = map_threads_maybe_lookup( tid );
2056 /* cannot fail - Thread* must already exist */;
2057 tl_assert( HG_(is_sane_Thread)(thr) );
2059 lk = map_locks_maybe_lookup( (Addr)slock );
2060 if (lk && lk->heldBy) {
2061 /* it's held. So do the normal pre-unlock actions, as copied
2062 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2063 duplicates the map_locks_maybe_lookup. */
2064 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2069 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2073 /* More kludgery. If the lock has never been seen before, do
2074 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2077 if (SHOW_EVENTS >= 1)
2078 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2079 "(ctid=%d, slock=%p)\n",
2080 (Int)tid, (void*)slock );
2082 lk = map_locks_maybe_lookup( (Addr)slock );
2084 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2088 static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2089 void* slock, Word isTryLock )
2091 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2094 static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2097 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2100 static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2103 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2107 /* ----------------------------------------------------- */
2108 /* --------------- events to do with CVs --------------- */
2109 /* ----------------------------------------------------- */
2111 /* A mapping from CV to (the SO associated with it, plus some
2112 auxiliary data for error checking). When the CV is
2113 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2114 wait on it completes, we do a 'recv' from the SO. This is believed
2115 to give the correct happens-before events arising from CV
2116 signallings/broadcasts.
2119 /* .so is the SO for this CV.
2120 .mx_ga is the associated mutex, when .nWaiters > 0
2122 POSIX says effectively that the first pthread_cond_{timed}wait call
2123 causes a dynamic binding between the CV and the mutex, and that
2124 lasts until such time as the waiter count falls to zero. Hence
2125 need to keep track of the number of waiters in order to do
2126 consistency tracking. */
2129 SO* so; /* libhb-allocated SO */
2130 void* mx_ga; /* addr of associated mutex, if any */
2131 UWord nWaiters; /* # threads waiting on the CV */
2136 /* pthread_cond_t* -> CVInfo* */
2137 static WordFM* map_cond_to_CVInfo = NULL;
2139 static void map_cond_to_CVInfo_INIT ( void ) {
2140 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2141 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2142 "hg.mctCI.1", HG_(free), NULL );
2143 tl_assert(map_cond_to_CVInfo != NULL);
2147 static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
2149 map_cond_to_CVInfo_INIT();
2150 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2151 tl_assert(key == (UWord)cond);
2152 return (CVInfo*)val;
2154 SO* so = libhb_so_alloc();
2155 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2158 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2163 static void map_cond_to_CVInfo_delete ( void* cond ) {
2165 map_cond_to_CVInfo_INIT();
2166 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2167 CVInfo* cvi = (CVInfo*)valW;
2168 tl_assert(keyW == (UWord)cond);
2171 libhb_so_dealloc(cvi->so);
2177 static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2179 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2180 cond to a SO if it is not already so bound, and 'send' on the
2181 SO. This is later used by other thread(s) which successfully
2182 exit from a pthread_cond_wait on the same cv; then they 'recv'
2183 from the SO, thereby acquiring a dependency on this signalling
2189 if (SHOW_EVENTS >= 1)
2190 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2191 (Int)tid, (void*)cond );
2193 thr = map_threads_maybe_lookup( tid );
2194 tl_assert(thr); /* cannot fail - Thread* must already exist */
2196 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2200 // error-if: mutex is bogus
2201 // error-if: mutex is not locked
2202 // Hmm. POSIX doesn't actually say that it's an error to call
2203 // pthread_cond_signal with the associated mutex being unlocked.
2204 // Although it does say that it should be "if consistent scheduling
2207 // For the moment, disable these checks.
2208 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2209 //if (lk == NULL || cvi->mx_ga == 0) {
2210 // HG_(record_error_Misc)( thr,
2211 // "pthread_cond_{signal,broadcast}: "
2212 // "no or invalid mutex associated with cond");
2214 ///* note: lk could be NULL. Be careful. */
2216 // if (lk->kind == LK_rdwr) {
2217 // HG_(record_error_Misc)(thr,
2218 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2220 // if (lk->heldBy == NULL) {
2221 // HG_(record_error_Misc)(thr,
2222 // "pthread_cond_{signal,broadcast}: "
2223 // "associated lock is not held by any thread");
2225 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2226 // HG_(record_error_Misc)(thr,
2227 // "pthread_cond_{signal,broadcast}: "
2228 // "associated lock is not held by calling thread");
2232 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2235 /* returns True if it reckons 'mutex' is valid and held by this
2236 thread, else False */
2237 static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2238 void* cond, void* mutex )
2242 Bool lk_valid = True;
2245 if (SHOW_EVENTS >= 1)
2246 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2247 "(ctid=%d, cond=%p, mutex=%p)\n",
2248 (Int)tid, (void*)cond, (void*)mutex );
2250 thr = map_threads_maybe_lookup( tid );
2251 tl_assert(thr); /* cannot fail - Thread* must already exist */
2253 lk = map_locks_maybe_lookup( (Addr)mutex );
2255 /* Check for stupid mutex arguments. There are various ways to be
2256 a bozo. Only complain once, though, even if more than one thing
2260 HG_(record_error_Misc)(
2262 "pthread_cond_{timed}wait called with invalid mutex" );
2264 tl_assert( HG_(is_sane_LockN)(lk) );
2265 if (lk->kind == LK_rdwr) {
2267 HG_(record_error_Misc)(
2268 thr, "pthread_cond_{timed}wait called with mutex "
2269 "of type pthread_rwlock_t*" );
2271 if (lk->heldBy == NULL) {
2273 HG_(record_error_Misc)(
2274 thr, "pthread_cond_{timed}wait called with un-held mutex");
2276 if (lk->heldBy != NULL
2277 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
2279 HG_(record_error_Misc)(
2280 thr, "pthread_cond_{timed}wait called with mutex "
2281 "held by a different thread" );
2285 // error-if: cond is also associated with a different mutex
2286 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2289 if (cvi->nWaiters == 0) {
2290 /* form initial (CV,MX) binding */
2293 else /* check existing (CV,MX) binding */
2294 if (cvi->mx_ga != mutex) {
2295 HG_(record_error_Misc)(
2296 thr, "pthread_cond_{timed}wait: cond is associated "
2297 "with a different mutex");
2304 static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2305 void* cond, void* mutex )
2307 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2308 the SO for this cond, and 'recv' from it so as to acquire a
2309 dependency edge back to the signaller/broadcaster. */
2313 if (SHOW_EVENTS >= 1)
2314 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2315 "(ctid=%d, cond=%p, mutex=%p)\n",
2316 (Int)tid, (void*)cond, (void*)mutex );
2318 thr = map_threads_maybe_lookup( tid );
2319 tl_assert(thr); /* cannot fail - Thread* must already exist */
2321 // error-if: cond is also associated with a different mutex
2323 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2326 tl_assert(cvi->nWaiters > 0);
2328 if (!libhb_so_everSent(cvi->so)) {
2329 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2330 it? If this happened it would surely be a bug in the threads
2331 library. Or one of those fabled "spurious wakeups". */
2332 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2334 " without prior pthread_cond_post");
2337 /* anyway, acquire a dependency on it. */
2338 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2343 static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2346 /* Deal with destroy events. The only purpose is to free storage
2347 associated with the CV, so as to avoid any possible resource
2349 if (SHOW_EVENTS >= 1)
2350 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2351 "(ctid=%d, cond=%p)\n",
2352 (Int)tid, (void*)cond );
2354 map_cond_to_CVInfo_delete( cond );
2358 /* ------------------------------------------------------- */
2359 /* -------------- events to do with rwlocks -------------- */
2360 /* ------------------------------------------------------- */
2362 /* EXPOSITION only */
2364 void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2366 if (SHOW_EVENTS >= 1)
2367 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2368 (Int)tid, (void*)rwl );
2369 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
2370 if (HG_(clo_sanity_flags) & SCE_LOCKS)
2371 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2375 void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2379 if (SHOW_EVENTS >= 1)
2380 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2381 (Int)tid, (void*)rwl );
2383 thr = map_threads_maybe_lookup( tid );
2384 /* cannot fail - Thread* must already exist */
2385 tl_assert( HG_(is_sane_Thread)(thr) );
2387 lk = map_locks_maybe_lookup( (Addr)rwl );
2389 if (lk == NULL || lk->kind != LK_rdwr) {
2390 HG_(record_error_Misc)(
2391 thr, "pthread_rwlock_destroy with invalid argument" );
2395 tl_assert( HG_(is_sane_LockN)(lk) );
2396 tl_assert( lk->guestaddr == (Addr)rwl );
2398 /* Basically act like we unlocked the lock */
2399 HG_(record_error_Misc)(
2400 thr, "pthread_rwlock_destroy of a locked mutex" );
2401 /* remove lock from locksets of all owning threads */
2402 remove_Lock_from_locksets_of_all_owning_Threads( lk );
2403 VG_(deleteBag)( lk->heldBy );
2406 lk->acquired_at = NULL;
2408 tl_assert( !lk->heldBy );
2409 tl_assert( HG_(is_sane_LockN)(lk) );
2411 laog__handle_one_lock_deletion(lk);
2412 map_locks_delete( lk->guestaddr );
2416 if (HG_(clo_sanity_flags) & SCE_LOCKS)
2417 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2421 void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2423 Word isW, Word isTryLock )
2425 /* Just check the rwl is sane; nothing else to do. */
2426 // 'rwl' may be invalid - not checked by wrapper
2429 if (SHOW_EVENTS >= 1)
2430 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2431 (Int)tid, (Int)isW, (void*)rwl );
2433 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2434 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
2435 thr = map_threads_maybe_lookup( tid );
2436 tl_assert(thr); /* cannot fail - Thread* must already exist */
2438 lk = map_locks_maybe_lookup( (Addr)rwl );
2440 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2441 /* Wrong kind of lock. Duh. */
2442 HG_(record_error_Misc)(
2443 thr, "pthread_rwlock_{rd,rw}lock with a "
2444 "pthread_mutex_t* argument " );
2449 void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2451 // only called if the real library call succeeded - so mutex is sane
2453 if (SHOW_EVENTS >= 1)
2454 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2455 (Int)tid, (Int)isW, (void*)rwl );
2457 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2458 thr = map_threads_maybe_lookup( tid );
2459 tl_assert(thr); /* cannot fail - Thread* must already exist */
2461 (isW ? evhH__post_thread_w_acquires_lock
2462 : evhH__post_thread_r_acquires_lock)(
2464 LK_rdwr, /* if not known, create new lock with this LockKind */
2469 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2471 // 'rwl' may be invalid - not checked by wrapper
2473 if (SHOW_EVENTS >= 1)
2474 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2475 (Int)tid, (void*)rwl );
2477 thr = map_threads_maybe_lookup( tid );
2478 tl_assert(thr); /* cannot fail - Thread* must already exist */
2480 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2483 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2485 // only called if the real library call succeeded - so mutex is sane
2487 if (SHOW_EVENTS >= 1)
2488 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2489 (Int)tid, (void*)rwl );
2490 thr = map_threads_maybe_lookup( tid );
2491 tl_assert(thr); /* cannot fail - Thread* must already exist */
2493 // anything we should do here?
2497 /* ---------------------------------------------------------- */
2498 /* -------------- events to do with semaphores -------------- */
2499 /* ---------------------------------------------------------- */
2501 /* This is similar to but not identical to the handling for condition
2504 /* For each semaphore, we maintain a stack of SOs. When a 'post'
2505 operation is done on a semaphore (unlocking, essentially), a new SO
2506 is created for the posting thread, the posting thread does a strong
2507 send to it (which merely installs the posting thread's VC in the
2508 SO), and the SO is pushed on the semaphore's stack.
2510 Later, when a (probably different) thread completes 'wait' on the
2511 semaphore, we pop a SO off the semaphore's stack (which should be
2512 nonempty), and do a strong recv from it. This mechanism creates
2513 dependencies between posters and waiters of the semaphore.
2515 It may not be necessary to use a stack - perhaps a bag of SOs would
2516 do. But we do need to keep track of how many unused-up posts have
2517 happened for the semaphore.
2519 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
2520 twice on S. T3 cannot complete its waits without both T1 and T2
2521 posting. The above mechanism will ensure that T3 acquires
2522 dependencies on both T1 and T2.
2524 When a semaphore is initialised with value N, we do as if we'd
2525 posted N times on the semaphore: basically create N SOs and do a
2526 strong send to all of then. This allows up to N waits on the
2527 semaphore to acquire a dependency on the initialisation point,
2528 which AFAICS is the correct behaviour.
2530 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2534 /* sem_t* -> XArray* SO* */
2535 static WordFM* map_sem_to_SO_stack = NULL;
2537 static void map_sem_to_SO_stack_INIT ( void ) {
2538 if (map_sem_to_SO_stack == NULL) {
2539 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2541 tl_assert(map_sem_to_SO_stack != NULL);
2545 static void push_SO_for_sem ( void* sem, SO* so ) {
2549 map_sem_to_SO_stack_INIT();
2550 if (VG_(lookupFM)( map_sem_to_SO_stack,
2551 &keyW, (UWord*)&xa, (UWord)sem )) {
2552 tl_assert(keyW == (UWord)sem);
2554 VG_(addToXA)( xa, &so );
2556 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2557 VG_(addToXA)( xa, &so );
2558 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
2562 static SO* mb_pop_SO_for_sem ( void* sem ) {
2566 map_sem_to_SO_stack_INIT();
2567 if (VG_(lookupFM)( map_sem_to_SO_stack,
2568 &keyW, (UWord*)&xa, (UWord)sem )) {
2569 /* xa is the stack for this semaphore. */
2571 tl_assert(keyW == (UWord)sem);
2572 sz = VG_(sizeXA)( xa );
2575 return NULL; /* odd, the stack is empty */
2576 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2578 VG_(dropTailXA)( xa, 1 );
2581 /* hmm, that's odd. No stack for this semaphore. */
2586 static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
2591 if (SHOW_EVENTS >= 1)
2592 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
2593 (Int)tid, (void*)sem );
2595 map_sem_to_SO_stack_INIT();
2597 /* Empty out the semaphore's SO stack. This way of doing it is
2598 stupid, but at least it's easy. */
2600 so = mb_pop_SO_for_sem( sem );
2602 libhb_so_dealloc(so);
2605 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2606 XArray* xa = (XArray*)valW;
2607 tl_assert(keyW == (UWord)sem);
2609 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2615 void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2620 if (SHOW_EVENTS >= 1)
2621 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2622 (Int)tid, (void*)sem, value );
2624 thr = map_threads_maybe_lookup( tid );
2625 tl_assert(thr); /* cannot fail - Thread* must already exist */
2627 /* Empty out the semaphore's SO stack. This way of doing it is
2628 stupid, but at least it's easy. */
2630 so = mb_pop_SO_for_sem( sem );
2632 libhb_so_dealloc(so);
2635 /* If we don't do this check, the following while loop runs us out
2636 of memory for stupid initial values of 'value'. */
2637 if (value > 10000) {
2638 HG_(record_error_Misc)(
2639 thr, "sem_init: initial value exceeds 10000; using 10000" );
2643 /* Now create 'valid' new SOs for the thread, do a strong send to
2644 each of them, and push them all on the stack. */
2645 for (; value > 0; value--) {
2646 Thr* hbthr = thr->hbthr;
2649 so = libhb_so_alloc();
2650 libhb_so_send( hbthr, so, True/*strong send*/ );
2651 push_SO_for_sem( sem, so );
2655 static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
2657 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2658 it (iow, write our VC into it, then tick ours), and push the SO
2659 on on a stack of SOs associated with 'sem'. This is later used
2660 by other thread(s) which successfully exit from a sem_wait on
2661 the same sem; by doing a strong recv from SOs popped of the
2662 stack, they acquire dependencies on the posting thread
2669 if (SHOW_EVENTS >= 1)
2670 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
2671 (Int)tid, (void*)sem );
2673 thr = map_threads_maybe_lookup( tid );
2674 tl_assert(thr); /* cannot fail - Thread* must already exist */
2676 // error-if: sem is bogus
2681 so = libhb_so_alloc();
2682 libhb_so_send( hbthr, so, True/*strong send*/ );
2683 push_SO_for_sem( sem, so );
2686 static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
2688 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2689 the 'sem' from this semaphore's SO-stack, and do a strong recv
2690 from it. This creates a dependency back to one of the post-ers
2691 for the semaphore. */
2697 if (SHOW_EVENTS >= 1)
2698 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
2699 (Int)tid, (void*)sem );
2701 thr = map_threads_maybe_lookup( tid );
2702 tl_assert(thr); /* cannot fail - Thread* must already exist */
2704 // error-if: sem is bogus
2706 so = mb_pop_SO_for_sem( sem );
2712 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2713 libhb_so_dealloc(so);
2715 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2716 If this happened it would surely be a bug in the threads
2718 HG_(record_error_Misc)(
2719 thr, "Bug in libpthread: sem_wait succeeded on"
2720 " semaphore without prior sem_post");
2725 /* -------------------------------------------------------- */
2726 /* -------------- events to do with barriers -------------- */
2727 /* -------------------------------------------------------- */
2731 Bool initted; /* has it yet been initted by guest? */
2732 Bool resizable; /* is resizing allowed? */
2733 UWord size; /* declared size */
2734 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2738 static Bar* new_Bar ( void ) {
2739 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2741 /* all fields are zero */
2742 tl_assert(bar->initted == False);
2746 static void delete_Bar ( Bar* bar ) {
2749 VG_(deleteXA)(bar->waiting);
2753 /* A mapping which stores auxiliary data for barriers. */
2755 /* pthread_barrier_t* -> Bar* */
2756 static WordFM* map_barrier_to_Bar = NULL;
2758 static void map_barrier_to_Bar_INIT ( void ) {
2759 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2760 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2761 "hg.mbtBI.1", HG_(free), NULL );
2762 tl_assert(map_barrier_to_Bar != NULL);
2766 static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2768 map_barrier_to_Bar_INIT();
2769 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2770 tl_assert(key == (UWord)barrier);
2773 Bar* bar = new_Bar();
2774 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2779 static void map_barrier_to_Bar_delete ( void* barrier ) {
2781 map_barrier_to_Bar_INIT();
2782 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2783 Bar* bar = (Bar*)valW;
2784 tl_assert(keyW == (UWord)barrier);
2790 static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2798 if (SHOW_EVENTS >= 1)
2799 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
2800 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2801 (Int)tid, (void*)barrier, count, resizable );
2803 thr = map_threads_maybe_lookup( tid );
2804 tl_assert(thr); /* cannot fail - Thread* must already exist */
2807 HG_(record_error_Misc)(
2808 thr, "pthread_barrier_init: 'count' argument is zero"
2812 if (resizable != 0 && resizable != 1) {
2813 HG_(record_error_Misc)(
2814 thr, "pthread_barrier_init: invalid 'resizable' argument"
2818 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2822 HG_(record_error_Misc)(
2823 thr, "pthread_barrier_init: barrier is already initialised"
2827 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2828 tl_assert(bar->initted);
2829 HG_(record_error_Misc)(
2830 thr, "pthread_barrier_init: threads are waiting at barrier"
2832 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2834 if (!bar->waiting) {
2835 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2839 tl_assert(bar->waiting);
2840 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
2841 bar->initted = True;
2842 bar->resizable = resizable == 1 ? True : False;
2847 static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2853 /* Deal with destroy events. The only purpose is to free storage
2854 associated with the barrier, so as to avoid any possible
2856 if (SHOW_EVENTS >= 1)
2857 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2858 "(tid=%d, barrier=%p)\n",
2859 (Int)tid, (void*)barrier );
2861 thr = map_threads_maybe_lookup( tid );
2862 tl_assert(thr); /* cannot fail - Thread* must already exist */
2864 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2867 if (!bar->initted) {
2868 HG_(record_error_Misc)(
2869 thr, "pthread_barrier_destroy: barrier was never initialised"
2873 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2874 HG_(record_error_Misc)(
2875 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2879 /* Maybe we shouldn't do this; just let it persist, so that when it
2880 is reinitialised we don't need to do any dynamic memory
2881 allocation? The downside is a potentially unlimited space leak,
2882 if the client creates (in turn) a large number of barriers all
2883 at different locations. Note that if we do later move to the
2884 don't-delete-it scheme, we need to mark the barrier as
2885 uninitialised again since otherwise a later _init call will
2886 elicit a duplicate-init error. */
2887 map_barrier_to_Bar_delete( barrier );
2891 /* All the threads have arrived. Now do the Interesting Bit. Get a
2892 new synchronisation object and do a weak send to it from all the
2893 participating threads. This makes its vector clocks be the join of
2894 all the individual threads' vector clocks. Then do a strong
2895 receive from it back to all threads, so that their VCs are a copy
2896 of it (hence are all equal to the join of their original VCs.) */
2897 static void do_barrier_cross_sync_and_empty ( Bar* bar )
2899 /* XXX check bar->waiting has no duplicates */
2901 SO* so = libhb_so_alloc();
2903 tl_assert(bar->waiting);
2904 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2906 /* compute the join ... */
2907 for (i = 0; i < bar->size; i++) {
2908 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2909 Thr* hbthr = t->hbthr;
2910 libhb_so_send( hbthr, so, False/*weak send*/ );
2912 /* ... and distribute to all threads */
2913 for (i = 0; i < bar->size; i++) {
2914 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2915 Thr* hbthr = t->hbthr;
2916 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2919 /* finally, we must empty out the waiting vector */
2920 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2922 /* and we don't need this any more. Perhaps a stack-allocated
2923 SO would be better? */
2924 libhb_so_dealloc(so);
2928 static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2931 /* This function gets called after a client thread calls
2932 pthread_barrier_wait but before it arrives at the real
2933 pthread_barrier_wait.
2935 Why is the following correct? It's a bit subtle.
2937 If this is not the last thread arriving at the barrier, we simply
2938 note its presence and return. Because valgrind (at least as of
2939 Nov 08) is single threaded, we are guaranteed safe from any race
2940 conditions when in this function -- no other client threads are
2943 If this is the last thread, then we are again the only running
2944 thread. All the other threads will have either arrived at the
2945 real pthread_barrier_wait or are on their way to it, but in any
2946 case are guaranteed not to be able to move past it, because this
2947 thread is currently in this function and so has not yet arrived
2948 at the real pthread_barrier_wait. That means that:
2950 1. While we are in this function, none of the other threads
2951 waiting at the barrier can move past it.
2953 2. When this function returns (and simulated execution resumes),
2954 this thread and all other waiting threads will be able to move
2955 past the real barrier.
2957 Because of this, it is now safe to update the vector clocks of
2958 all threads, to represent the fact that they all arrived at the
2959 barrier and have all moved on. There is no danger of any
2960 complications to do with some threads leaving the barrier and
2961 racing back round to the front, whilst others are still leaving
2962 (which is the primary source of complication in correct handling/
2963 implementation of barriers). That can't happen because we update
2964 here our data structures so as to indicate that the threads have
2965 passed the barrier, even though, as per (2) above, they are
2966 guaranteed not to pass the barrier until we return.
2968 This relies crucially on Valgrind being single threaded. If that
2969 changes, this will need to be reconsidered.
2975 if (SHOW_EVENTS >= 1)
2976 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2977 "(tid=%d, barrier=%p)\n",
2978 (Int)tid, (void*)barrier );
2980 thr = map_threads_maybe_lookup( tid );
2981 tl_assert(thr); /* cannot fail - Thread* must already exist */
2983 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2986 if (!bar->initted) {
2987 HG_(record_error_Misc)(
2988 thr, "pthread_barrier_wait: barrier is uninitialised"
2990 return; /* client is broken .. avoid assertions below */
2993 /* guaranteed by _INIT_PRE above */
2994 tl_assert(bar->size > 0);
2995 tl_assert(bar->waiting);
2997 VG_(addToXA)( bar->waiting, &thr );
2999 /* guaranteed by this function */
3000 present = VG_(sizeXA)(bar->waiting);
3001 tl_assert(present > 0 && present <= bar->size);
3003 if (present < bar->size)
3006 do_barrier_cross_sync_and_empty(bar);
3010 static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3018 if (SHOW_EVENTS >= 1)
3019 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3020 "(tid=%d, barrier=%p, newcount=%lu)\n",
3021 (Int)tid, (void*)barrier, newcount );
3023 thr = map_threads_maybe_lookup( tid );
3024 tl_assert(thr); /* cannot fail - Thread* must already exist */
3026 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3029 if (!bar->initted) {
3030 HG_(record_error_Misc)(
3031 thr, "pthread_barrier_resize: barrier is uninitialised"
3033 return; /* client is broken .. avoid assertions below */
3036 if (!bar->resizable) {
3037 HG_(record_error_Misc)(
3038 thr, "pthread_barrier_resize: barrier is may not be resized"
3040 return; /* client is broken .. avoid assertions below */
3043 if (newcount == 0) {
3044 HG_(record_error_Misc)(
3045 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3047 return; /* client is broken .. avoid assertions below */
3050 /* guaranteed by _INIT_PRE above */
3051 tl_assert(bar->size > 0);
3052 tl_assert(bar->waiting);
3053 /* Guaranteed by this fn */
3054 tl_assert(newcount > 0);
3056 if (newcount >= bar->size) {
3057 /* Increasing the capacity. There's no possibility of threads
3058 moving on from the barrier in this situation, so just note
3059 the fact and do nothing more. */
3060 bar->size = newcount;
3062 /* Decreasing the capacity. If we decrease it to be equal or
3063 below the number of waiting threads, they will now move past
3064 the barrier, so need to mess with dep edges in the same way
3065 as if the barrier had filled up normally. */
3066 present = VG_(sizeXA)(bar->waiting);
3067 tl_assert(present >= 0 && present <= bar->size);
3068 if (newcount <= present) {
3069 bar->size = present; /* keep the cross_sync call happy */
3070 do_barrier_cross_sync_and_empty(bar);
3072 bar->size = newcount;
3077 /* ----------------------------------------------------- */
3078 /* ----- events to do with user-specified HB edges ----- */
3079 /* ----------------------------------------------------- */
3081 /* A mapping from arbitrary UWord tag to the SO associated with it.
3082 The UWord tags are meaningless to us, interpreted only by the
3088 static WordFM* map_usertag_to_SO = NULL;
3090 static void map_usertag_to_SO_INIT ( void ) {
3091 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3092 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3093 "hg.mutS.1", HG_(free), NULL );
3094 tl_assert(map_usertag_to_SO != NULL);
3098 static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3100 map_usertag_to_SO_INIT();
3101 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3102 tl_assert(key == (UWord)usertag);
3105 SO* so = libhb_so_alloc();
3106 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3111 // If it's ever needed (XXX check before use)
3112 //static void map_usertag_to_SO_delete ( UWord usertag ) {
3113 // UWord keyW, valW;
3114 // map_usertag_to_SO_INIT();
3115 // if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3116 // SO* so = (SO*)valW;
3117 // tl_assert(keyW == usertag);
3119 // libhb_so_dealloc(so);
3125 void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3127 /* TID is just about to notionally sent a message on a notional
3128 abstract synchronisation object whose identity is given by
3129 USERTAG. Bind USERTAG to a real SO if it is not already so
3130 bound, and do a 'strong send' on the SO. This is later used by
3131 other thread(s) which successfully 'receive' from the SO,
3132 thereby acquiring a dependency on this signalling event. */
3136 if (SHOW_EVENTS >= 1)
3137 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3138 (Int)tid, usertag );
3140 thr = map_threads_maybe_lookup( tid );
3141 tl_assert(thr); /* cannot fail - Thread* must already exist */
3143 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3146 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3150 void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3152 /* TID has just notionally received a message from a notional
3153 abstract synchronisation object whose identity is given by
3154 USERTAG. Bind USERTAG to a real SO if it is not already so
3155 bound. If the SO has at some point in the past been 'sent' on,
3156 to a 'strong receive' on it, thereby acquiring a dependency on
3161 if (SHOW_EVENTS >= 1)
3162 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3163 (Int)tid, usertag );
3165 thr = map_threads_maybe_lookup( tid );
3166 tl_assert(thr); /* cannot fail - Thread* must already exist */
3168 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3171 /* Acquire a dependency on it. If the SO has never so far been
3172 sent on, then libhb_so_recv will do nothing. So we're safe
3173 regardless of SO's history. */
3174 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3178 /*--------------------------------------------------------------*/
3179 /*--- Lock acquisition order monitoring ---*/
3180 /*--------------------------------------------------------------*/
3182 /* FIXME: here are some optimisations still to do in
3183 laog__pre_thread_acquires_lock.
3185 The graph is structured so that if L1 --*--> L2 then L1 must be
3188 The common case is that some thread T holds (eg) L1 L2 and L3 and
3189 is repeatedly acquiring and releasing Ln, and there is no ordering
3190 error in what it is doing. Hence it repeatly:
3192 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3193 produces the answer No (because there is no error).
3195 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3196 (because they already got added the first time T acquired Ln).
3198 Hence cache these two events:
3200 (1) Cache result of the query from last time. Invalidate the cache
3201 any time any edges are added to or deleted from laog.
3203 (2) Cache these add-edge requests and ignore them if said edges
3204 have already been added to laog. Invalidate the cache any time
3205 any edges are deleted from laog.
3210 WordSetID inns; /* in univ_laog */
3211 WordSetID outs; /* in univ_laog */
3215 /* lock order acquisition graph */
3216 static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3218 /* EXPOSITION ONLY: for each edge in 'laog', record the two places
3219 where that edge was created, so that we can show the user later if
3223 Addr src_ga; /* Lock guest addresses for */
3224 Addr dst_ga; /* src/dst of the edge */
3225 ExeContext* src_ec; /* And corresponding places where that */
3226 ExeContext* dst_ec; /* ordering was established */
3230 static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
3231 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3232 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3233 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3234 if (llx1->src_ga < llx2->src_ga) return -1;
3235 if (llx1->src_ga > llx2->src_ga) return 1;
3236 if (llx1->dst_ga < llx2->dst_ga) return -1;
3237 if (llx1->dst_ga > llx2->dst_ga) return 1;
3241 static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3242 /* end EXPOSITION ONLY */
3245 __attribute__((noinline))
3246 static void laog__init ( void )
3249 tl_assert(!laog_exposition);
3251 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3252 HG_(free), NULL/*unboxedcmp*/ );
3254 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3255 cmp_LAOGLinkExposition );
3257 tl_assert(laog_exposition);
3260 static void laog__show ( Char* who ) {
3265 VG_(printf)("laog (requested by %s) {\n", who);
3266 VG_(initIterFM)( laog );
3269 while (VG_(nextIterFM)( laog, (Word*)&me,
3273 VG_(printf)(" node %p:\n", me);
3274 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3275 for (i = 0; i < ws_size; i++)
3276 VG_(printf)(" inn %#lx\n", ws_words[i] );
3277 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3278 for (i = 0; i < ws_size; i++)
3279 VG_(printf)(" out %#lx\n", ws_words[i] );
3283 VG_(doneIterFM)( laog );
3287 __attribute__((noinline))
3288 static void laog__add_edge ( Lock* src, Lock* dst ) {
3291 Bool presentF, presentR;
3292 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3294 /* Take the opportunity to sanity check the graph. Record in
3295 presentF if there is already a src->dst mapping in this node's
3296 forwards links, and presentR if there is already a src->dst
3297 mapping in this node's backwards links. They should agree!
3298 Also, we need to know whether the edge was already present so as
3299 to decide whether or not to update the link details mapping. We
3300 can compute presentF and presentR essentially for free, so may
3301 as well do this always. */
3302 presentF = presentR = False;
3304 /* Update the out edges for src */
3307 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
3310 tl_assert(keyW == (Word)src);
3311 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3312 presentF = outs_new == links->outs;
3313 links->outs = outs_new;
3315 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
3316 links->inns = HG_(emptyWS)( univ_laog );
3317 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
3318 VG_(addToFM)( laog, (Word)src, (Word)links );
3320 /* Update the in edges for dst */
3323 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
3326 tl_assert(keyW == (Word)dst);
3327 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3328 presentR = inns_new == links->inns;
3329 links->inns = inns_new;
3331 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
3332 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3333 links->outs = HG_(emptyWS)( univ_laog );
3334 VG_(addToFM)( laog, (Word)dst, (Word)links );
3337 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3339 if (!presentF && src->acquired_at && dst->acquired_at) {
3340 LAOGLinkExposition expo;
3341 /* If this edge is entering the graph, and we have acquired_at
3342 information for both src and dst, record those acquisition
3343 points. Hence, if there is later a violation of this
3344 ordering, we can show the user the two places in which the
3345 required src-dst ordering was previously established. */
3346 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
3347 src->guestaddr, dst->guestaddr);
3348 expo.src_ga = src->guestaddr;
3349 expo.dst_ga = dst->guestaddr;
3352 tl_assert(laog_exposition);
3353 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
3354 /* we already have it; do nothing */
3356 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3357 sizeof(LAOGLinkExposition));
3358 expo2->src_ga = src->guestaddr;
3359 expo2->dst_ga = dst->guestaddr;
3360 expo2->src_ec = src->acquired_at;
3361 expo2->dst_ec = dst->acquired_at;
3362 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
3367 __attribute__((noinline))
3368 static void laog__del_edge ( Lock* src, Lock* dst ) {
3371 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3372 /* Update the out edges for src */
3375 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
3377 tl_assert(keyW == (Word)src);
3378 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3380 /* Update the in edges for dst */
3383 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
3385 tl_assert(keyW == (Word)dst);
3386 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3390 __attribute__((noinline))
3391 static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3396 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
3398 tl_assert(keyW == (Word)lk);
3401 return HG_(emptyWS)( univ_laog );
3405 __attribute__((noinline))
3406 static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3411 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
3413 tl_assert(keyW == (Word)lk);
3416 return HG_(emptyWS)( univ_laog );
3420 __attribute__((noinline))
3421 static void laog__sanity_check ( Char* who ) {
3426 if (UNLIKELY(!laog || !laog_exposition))
3428 VG_(initIterFM)( laog );
3431 if (0) VG_(printf)("laog sanity check\n");
3432 while (VG_(nextIterFM)( laog, (Word*)&me,
3436 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3437 for (i = 0; i < ws_size; i++) {
3438 if ( ! HG_(elemWS)( univ_laog,
3439 laog__succs( (Lock*)ws_words[i] ),
3443 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3444 for (i = 0; i < ws_size; i++) {
3445 if ( ! HG_(elemWS)( univ_laog,
3446 laog__preds( (Lock*)ws_words[i] ),
3453 VG_(doneIterFM)( laog );
3457 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3462 /* If there is a path in laog from 'src' to any of the elements in
3463 'dst', return an arbitrarily chosen element of 'dst' reachable from
3464 'src'. If no path exist from 'src' to any element in 'dst', return
3466 __attribute__((noinline))
3468 Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3472 XArray* stack; /* of Lock* */
3473 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3478 //laog__sanity_check();
3480 /* If the destination set is empty, we can never get there from
3481 'src' :-), so don't bother to try */
3482 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3486 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3487 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
3489 (void) VG_(addToXA)( stack, &src );
3493 ssz = VG_(sizeXA)( stack );
3495 if (ssz == 0) { ret = NULL; break; }
3497 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3498 VG_(dropTailXA)( stack, 1 );
3500 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3502 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
3505 VG_(addToFM)( visited, (Word)here, 0 );
3507 succs = laog__succs( here );
3508 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3509 for (i = 0; i < succs_size; i++)
3510 (void) VG_(addToXA)( stack, &succs_words[i] );
3513 VG_(deleteFM)( visited, NULL, NULL );
3514 VG_(deleteXA)( stack );
3519 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3520 between 'lk' and the locks already held by 'thr' and issue a
3521 complaint if so. Also, update the ordering graph appropriately.
3523 __attribute__((noinline))
3524 static void laog__pre_thread_acquires_lock (
3525 Thread* thr, /* NB: BEFORE lock is added */
3533 /* It may be that 'thr' already holds 'lk' and is recursively
3534 relocking in. In this case we just ignore the call. */
3535 /* NB: univ_lsets really is correct here */
3536 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3539 if (UNLIKELY(!laog || !laog_exposition))
3542 /* First, the check. Complain if there is any path in laog from lk
3543 to any of the locks already held by thr, since if any such path
3544 existed, it would mean that previously lk was acquired before
3545 (rather than after, as we are doing here) at least one of those
3548 other = laog__do_dfs_from_to(lk, thr->locksetA);
3550 LAOGLinkExposition key, *found;
3551 /* So we managed to find a path lk --*--> other in the graph,
3552 which implies that 'lk' should have been acquired before
3553 'other' but is in fact being acquired afterwards. We present
3554 the lk/other arguments to record_error_LockOrder in the order
3555 in which they should have been acquired. */
3556 /* Go look in the laog_exposition mapping, to find the allocation
3557 points for this edge, so we can show the user. */
3558 key.src_ga = lk->guestaddr;
3559 key.dst_ga = other->guestaddr;
3563 if (VG_(lookupFM)( laog_exposition,
3564 (Word*)&found, NULL, (Word)&key )) {
3565 tl_assert(found != &key);
3566 tl_assert(found->src_ga == key.src_ga);
3567 tl_assert(found->dst_ga == key.dst_ga);
3568 tl_assert(found->src_ec);
3569 tl_assert(found->dst_ec);
3570 HG_(record_error_LockOrder)(
3571 thr, lk->guestaddr, other->guestaddr,
3572 found->src_ec, found->dst_ec );
3574 /* Hmm. This can't happen (can it?) */
3575 HG_(record_error_LockOrder)(
3576 thr, lk->guestaddr, other->guestaddr,
3581 /* Second, add to laog the pairs
3582 (old, lk) | old <- locks already held by thr
3583 Since both old and lk are currently held by thr, their acquired_at
3584 fields must be non-NULL.
3586 tl_assert(lk->acquired_at);
3587 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3588 for (i = 0; i < ls_size; i++) {
3589 Lock* old = (Lock*)ls_words[i];
3590 tl_assert(old->acquired_at);
3591 laog__add_edge( old, lk );
3594 /* Why "except_Locks" ? We're here because a lock is being
3595 acquired by a thread, and we're in an inconsistent state here.
3596 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3597 When called in this inconsistent state, locks__sanity_check duly
3599 if (HG_(clo_sanity_flags) & SCE_LAOG)
3600 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3604 /* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3606 __attribute__((noinline))
3607 static void laog__handle_one_lock_deletion ( Lock* lk )
3609 WordSetID preds, succs;
3610 Word preds_size, succs_size, i, j;
3611 UWord *preds_words, *succs_words;
3613 if (UNLIKELY(!laog || !laog_exposition))
3616 preds = laog__preds( lk );
3617 succs = laog__succs( lk );
3619 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3620 for (i = 0; i < preds_size; i++)
3621 laog__del_edge( (Lock*)preds_words[i], lk );
3623 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3624 for (j = 0; j < succs_size; j++)
3625 laog__del_edge( lk, (Lock*)succs_words[j] );
3627 for (i = 0; i < preds_size; i++) {
3628 for (j = 0; j < succs_size; j++) {
3629 if (preds_words[i] != succs_words[j]) {
3630 /* This can pass unlocked locks to laog__add_edge, since
3631 we're deleting stuff. So their acquired_at fields may
3633 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3639 //__attribute__((noinline))
3640 //static void laog__handle_lock_deletions (
3641 // WordSetID /* in univ_laog */ locksToDelete
3647 // if (UNLIKELY(!laog || !laog_exposition))
3650 // HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3651 // for (i = 0; i < ws_size; i++)
3652 // laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3654 // if (HG_(clo_sanity_flags) & SCE_LAOG)
3655 // all__sanity_check("laog__handle_lock_deletions-post");
3659 /*--------------------------------------------------------------*/
3660 /*--- Malloc/free replacements ---*/
3661 /*--------------------------------------------------------------*/
3665 void* next; /* required by m_hashtable */
3666 Addr payload; /* ptr to actual block */
3667 SizeT szB; /* size requested */
3668 ExeContext* where; /* where it was allocated */
3669 Thread* thr; /* allocating thread */
3673 /* A hash table of MallocMetas, used to track malloc'd blocks
3675 static VgHashTable hg_mallocmeta_table = NULL;
3678 static MallocMeta* new_MallocMeta ( void ) {
3679 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
3683 static void delete_MallocMeta ( MallocMeta* md ) {
3688 /* Allocate a client block and set up the metadata for it. */
3691 void* handle_alloc ( ThreadId tid,
3692 SizeT szB, SizeT alignB, Bool is_zeroed )
3697 tl_assert( ((SSizeT)szB) >= 0 );
3698 p = (Addr)VG_(cli_malloc)(alignB, szB);
3703 VG_(memset)((void*)p, 0, szB);
3705 /* Note that map_threads_lookup must succeed (cannot assert), since
3706 memory can only be allocated by currently alive threads, hence
3707 they must have an entry in map_threads. */
3708 md = new_MallocMeta();
3711 md->where = VG_(record_ExeContext)( tid, 0 );
3712 md->thr = map_threads_lookup( tid );
3714 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3716 /* Tell the lower level memory wranglers. */
3717 evh__new_mem_heap( p, szB, is_zeroed );
3722 /* Re the checks for less-than-zero (also in hg_cli__realloc below):
3723 Cast to a signed type to catch any unexpectedly negative args.
3724 We're assuming here that the size asked for is not greater than
3725 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3727 static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3728 if (((SSizeT)n) < 0) return NULL;
3729 return handle_alloc ( tid, n, VG_(clo_alignment),
3730 /*is_zeroed*/False );
3732 static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3733 if (((SSizeT)n) < 0) return NULL;
3734 return handle_alloc ( tid, n, VG_(clo_alignment),
3735 /*is_zeroed*/False );
3737 static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3738 if (((SSizeT)n) < 0) return NULL;
3739 return handle_alloc ( tid, n, VG_(clo_alignment),
3740 /*is_zeroed*/False );
3742 static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3743 if (((SSizeT)n) < 0) return NULL;
3744 return handle_alloc ( tid, n, align,
3745 /*is_zeroed*/False );
3747 static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3748 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3749 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3750 /*is_zeroed*/True );
3754 /* Free a client block, including getting rid of the relevant
3757 static void handle_free ( ThreadId tid, void* p )
3759 MallocMeta *md, *old_md;
3762 /* First see if we can find the metadata for 'p'. */
3763 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3765 return; /* apparently freeing a bogus address. Oh well. */
3767 tl_assert(md->payload == (Addr)p);
3770 /* Nuke the metadata block */
3771 old_md = (MallocMeta*)
3772 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3773 tl_assert(old_md); /* it must be present - we just found it */
3774 tl_assert(old_md == md);
3775 tl_assert(old_md->payload == (Addr)p);
3777 VG_(cli_free)((void*)old_md->payload);
3778 delete_MallocMeta(old_md);
3780 /* Tell the lower level memory wranglers. */
3781 evh__die_mem_heap( (Addr)p, szB );
3784 static void hg_cli__free ( ThreadId tid, void* p ) {
3785 handle_free(tid, p);
3787 static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3788 handle_free(tid, p);
3790 static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3791 handle_free(tid, p);
3795 static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3797 MallocMeta *md, *md_new, *md_tmp;
3800 Addr payload = (Addr)payloadV;
3802 if (((SSizeT)new_size) < 0) return NULL;
3804 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3806 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3808 tl_assert(md->payload == payload);
3810 if (md->szB == new_size) {
3811 /* size unchanged */
3812 md->where = VG_(record_ExeContext)(tid, 0);
3816 if (md->szB > new_size) {
3817 /* new size is smaller */
3819 md->where = VG_(record_ExeContext)(tid, 0);
3820 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3825 /* new size is bigger */
3826 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3828 /* First half kept and copied, second half new */
3829 // FIXME: shouldn't we use a copier which implements the
3830 // memory state machine?
3831 evh__copy_mem( payload, p_new, md->szB );
3832 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
3834 /* FIXME: can anything funny happen here? specifically, if the
3835 old range contained a lock, then die_mem_heap will complain.
3836 Is that the correct behaviour? Not sure. */
3837 evh__die_mem_heap( payload, md->szB );
3839 /* Copy from old to new */
3840 for (i = 0; i < md->szB; i++)
3841 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3843 /* Because the metadata hash table is index by payload address,
3844 we have to get rid of the old hash table entry and make a new
3845 one. We can't just modify the existing metadata in place,
3846 because then it would (almost certainly) be in the wrong hash
3848 md_new = new_MallocMeta();
3851 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3853 tl_assert(md_tmp == md);
3855 VG_(cli_free)((void*)md->payload);
3856 delete_MallocMeta(md);
3859 md_new->where = VG_(record_ExeContext)( tid, 0 );
3860 md_new->szB = new_size;
3861 md_new->payload = p_new;
3862 md_new->thr = map_threads_lookup( tid );
3865 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3867 return (void*)p_new;
3871 static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3873 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3875 // There may be slop, but pretend there isn't because only the asked-for
3876 // area will have been shadowed properly.
3877 return ( md ? md->szB : 0 );
3881 /* For error creation: map 'data_addr' to a malloc'd chunk, if any.
3882 Slow linear search. With a bit of hash table help if 'data_addr'
3883 is either the start of a block or up to 15 word-sized steps along
3884 from the start of a block. */
3886 static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3888 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3890 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3892 /* else normal interval rules apply */
3893 if (LIKELY(a < mm->payload)) return False;
3894 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3898 Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
3899 /*OUT*/Addr* payload,
3905 const Int n_fast_check_words = 16;
3907 /* First, do a few fast searches on the basis that data_addr might
3908 be exactly the start of a block or up to 15 words inside. This
3909 can happen commonly via the creq
3910 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3911 for (i = 0; i < n_fast_check_words; i++) {
3912 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3913 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3914 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3918 /* Well, this totally sucks. But without using an interval tree or
3919 some such, it's hard to see how to do better. We have to check
3920 every block in the entire table. */
3921 VG_(HT_ResetIter)(hg_mallocmeta_table);
3922 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
3923 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3927 /* Not found. Bah. */
3933 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3934 if (where) *where = mm->where;
3935 if (payload) *payload = mm->payload;
3936 if (szB) *szB = mm->szB;
3941 /*--------------------------------------------------------------*/
3942 /*--- Instrumentation ---*/
3943 /*--------------------------------------------------------------*/
3945 static void instrument_mem_access ( IRSB* bbOut,
3951 IRType tyAddr = Ity_INVALID;
3952 HChar* hName = NULL;
3955 IRExpr** argv = NULL;
3958 tl_assert(isIRAtom(addr));
3959 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3961 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3962 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3964 /* So the effective address is in 'addr' now. */
3965 regparms = 1; // unless stated otherwise
3969 hName = "evh__mem_help_cwrite_1";
3970 hAddr = &evh__mem_help_cwrite_1;
3971 argv = mkIRExprVec_1( addr );
3974 hName = "evh__mem_help_cwrite_2";
3975 hAddr = &evh__mem_help_cwrite_2;
3976 argv = mkIRExprVec_1( addr );
3979 hName = "evh__mem_help_cwrite_4";
3980 hAddr = &evh__mem_help_cwrite_4;
3981 argv = mkIRExprVec_1( addr );
3984 hName = "evh__mem_help_cwrite_8";
3985 hAddr = &evh__mem_help_cwrite_8;
3986 argv = mkIRExprVec_1( addr );
3989 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3991 hName = "evh__mem_help_cwrite_N";
3992 hAddr = &evh__mem_help_cwrite_N;
3993 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3999 hName = "evh__mem_help_cread_1";
4000 hAddr = &evh__mem_help_cread_1;
4001 argv = mkIRExprVec_1( addr );
4004 hName = "evh__mem_help_cread_2";
4005 hAddr = &evh__mem_help_cread_2;
4006 argv = mkIRExprVec_1( addr );
4009 hName = "evh__mem_help_cread_4";
4010 hAddr = &evh__mem_help_cread_4;
4011 argv = mkIRExprVec_1( addr );
4014 hName = "evh__mem_help_cread_8";
4015 hAddr = &evh__mem_help_cread_8;
4016 argv = mkIRExprVec_1( addr );
4019 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4021 hName = "evh__mem_help_cread_N";
4022 hAddr = &evh__mem_help_cread_N;
4023 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4028 /* Add the helper. */
4032 di = unsafeIRDirty_0_N( regparms,
4033 hName, VG_(fnptr_to_fnentry)( hAddr ),
4035 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
4039 /* Figure out if GA is a guest code address in the dynamic linker, and
4040 if so return True. Otherwise (and in case of any doubt) return
4041 False. (sidedly safe w/ False as the safe value) */
4042 static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4045 const UChar* soname;
4046 if (0) return False;
4048 dinfo = VG_(find_DebugInfo)( (Addr)ga );
4049 if (!dinfo) return False;
4051 soname = VG_(DebugInfo_get_soname)(dinfo);
4053 if (0) VG_(printf)("%s\n", soname);
4055 # if defined(VGO_linux)
4056 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
4057 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4058 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4059 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4060 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4061 # elif defined(VGO_darwin)
4062 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4064 # error "Unsupported OS"
4070 IRSB* hg_instrument ( VgCallbackClosure* closure,
4072 VexGuestLayout* layout,
4073 VexGuestExtents* vge,
4074 IRType gWordTy, IRType hWordTy )
4078 Addr64 cia; /* address of current insn */
4080 Bool inLDSO = False;
4081 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
4083 if (gWordTy != hWordTy) {
4084 /* We don't currently support this case. */
4085 VG_(tool_panic)("host/guest word size mismatch");
4088 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4089 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4093 bbOut = emptyIRSB();
4094 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4095 bbOut->next = deepCopyIRExpr(bbIn->next);
4096 bbOut->jumpkind = bbIn->jumpkind;
4098 // Copy verbatim any IR preamble preceding the first IMark
4100 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4101 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4105 // Get the first statement, and initial cia from it
4106 tl_assert(bbIn->stmts_used > 0);
4107 tl_assert(i < bbIn->stmts_used);
4108 st = bbIn->stmts[i];
4109 tl_assert(Ist_IMark == st->tag);
4110 cia = st->Ist.IMark.addr;
4113 for (/*use current i*/; i < bbIn->stmts_used; i++) {
4114 st = bbIn->stmts[i];
4116 tl_assert(isFlatIRStmt(st));
4123 /* None of these can contain any memory references. */
4127 /* no mem refs, but note the insn address. */
4128 cia = st->Ist.IMark.addr;
4129 /* Don't instrument the dynamic linker. It generates a
4130 lot of races which we just expensively suppress, so
4133 Avoid flooding is_in_dynamic_linker_shared_object with
4134 requests by only checking at transitions between 4K
4136 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4137 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4138 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4139 inLDSO = is_in_dynamic_linker_shared_object(cia);
4141 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4146 switch (st->Ist.MBE.event) {
4148 break; /* not interesting */
4155 /* Atomic read-modify-write cycle. Just pretend it's a
4157 IRCAS* cas = st->Ist.CAS.details;
4158 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4160 tl_assert(cas->expdHi);
4161 tl_assert(cas->dataHi);
4163 tl_assert(!cas->expdHi);
4164 tl_assert(!cas->dataHi);
4166 /* Just be boring about it. */
4168 instrument_mem_access(
4172 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4174 sizeofIRType(hWordTy)
4181 /* We pretend store-conditionals don't exist, viz, ignore
4182 them. Whereas load-linked's are treated the same as
4185 if (st->Ist.LLSC.storedata == NULL) {
4187 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
4189 instrument_mem_access(
4192 sizeofIRType(dataTy),
4194 sizeofIRType(hWordTy)
4205 /* It seems we pretend that store-conditionals don't
4206 exist, viz, just ignore them ... */
4208 instrument_mem_access(
4211 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4213 sizeofIRType(hWordTy)
4219 /* ... whereas here we don't care whether a load is a
4220 vanilla one or a load-linked. */
4221 IRExpr* data = st->Ist.WrTmp.data;
4222 if (data->tag == Iex_Load) {
4224 instrument_mem_access(
4226 data->Iex.Load.addr,
4227 sizeofIRType(data->Iex.Load.ty),
4229 sizeofIRType(hWordTy)
4238 IRDirty* d = st->Ist.Dirty.details;
4239 if (d->mFx != Ifx_None) {
4240 /* This dirty helper accesses memory. Collect the
4242 tl_assert(d->mAddr != NULL);
4243 tl_assert(d->mSize != 0);
4244 dataSize = d->mSize;
4245 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
4247 instrument_mem_access(
4248 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4249 sizeofIRType(hWordTy)
4253 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
4255 instrument_mem_access(
4256 bbOut, d->mAddr, dataSize, True/*isStore*/,
4257 sizeofIRType(hWordTy)
4262 tl_assert(d->mAddr == NULL);
4263 tl_assert(d->mSize == 0);
4273 } /* switch (st->tag) */
4275 addStmtToIRSB( bbOut, st );
4276 } /* iterate over bbIn->stmts */
4282 /*----------------------------------------------------------------*/
4283 /*--- Client requests ---*/
4284 /*----------------------------------------------------------------*/
4286 /* Sheesh. Yet another goddam finite map. */
4287 static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4289 static void map_pthread_t_to_Thread_INIT ( void ) {
4290 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
4291 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4293 tl_assert(map_pthread_t_to_Thread != NULL);
4299 Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4301 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4304 /* Anything that gets past the above check is one of ours, so we
4305 should be able to handle it. */
4307 /* default, meaningless return value, unless otherwise set */
4312 /* --- --- User-visible client requests --- --- */
4314 case VG_USERREQ__HG_CLEAN_MEMORY:
4315 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
4317 /* Call die_mem to (expensively) tidy up properly, if there
4318 are any held locks etc in the area. Calling evh__die_mem
4319 and then evh__new_mem is a bit inefficient; probably just
4320 the latter would do. */
4321 if (args[2] > 0) { /* length */
4322 evh__die_mem(args[1], args[2]);
4323 /* and then set it to New */
4324 evh__new_mem(args[1], args[2]);
4328 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4331 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4333 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4335 evh__die_mem(payload, pszB);
4336 evh__new_mem(payload, pszB);
4345 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4346 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4348 if (args[2] > 0) { /* length */
4349 evh__untrack_mem(args[1], args[2]);
4353 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4354 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4356 if (args[2] > 0) { /* length */
4357 evh__new_mem(args[1], args[2]);
4361 /* --- --- Client requests for Helgrind's use only --- --- */
4363 /* Some thread is telling us its pthread_t value. Record the
4364 binding between that and the associated Thread*, so we can
4365 later find the Thread* again when notified of a join by the
4367 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4368 Thread* my_thr = NULL;
4370 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4372 map_pthread_t_to_Thread_INIT();
4373 my_thr = map_threads_maybe_lookup( tid );
4374 /* This assertion should hold because the map_threads (tid to
4375 Thread*) binding should have been made at the point of
4376 low-level creation of this thread, which should have
4377 happened prior to us getting this client request for it.
4378 That's because this client request is sent from
4379 client-world from the 'thread_wrapper' function, which
4380 only runs once the thread has been low-level created. */
4381 tl_assert(my_thr != NULL);
4382 /* So now we know that (pthread_t)args[1] is associated with
4383 (Thread*)my_thr. Note that down. */
4385 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4386 (void*)args[1], (void*)my_thr );
4387 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
4391 case _VG_USERREQ__HG_PTH_API_ERROR: {
4392 Thread* my_thr = NULL;
4393 map_pthread_t_to_Thread_INIT();
4394 my_thr = map_threads_maybe_lookup( tid );
4395 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
4396 HG_(record_error_PthAPIerror)(
4397 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
4401 /* This thread (tid) has completed a join with the quitting
4402 thread whose pthread_t is in args[1]. */
4403 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4404 Thread* thr_q = NULL; /* quitter Thread* */
4407 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4409 map_pthread_t_to_Thread_INIT();
4410 found = VG_(lookupFM)( map_pthread_t_to_Thread,
4411 NULL, (Word*)&thr_q, (Word)args[1] );
4412 /* Can this fail? It would mean that our pthread_join
4413 wrapper observed a successful join on args[1] yet that
4414 thread never existed (or at least, it never lodged an
4415 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4416 sounds like a bug in the threads library. */
4417 // FIXME: get rid of this assertion; handle properly
4421 VG_(printf)(".................... quitter Thread* = %p\n",
4423 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4428 /* EXPOSITION only: by intercepting lock init events we can show
4429 the user where the lock was initialised, rather than only
4430 being able to show where it was first locked. Intercepting
4431 lock initialisations is not necessary for the basic operation
4432 of the race checker. */
4433 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4434 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4437 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4438 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4441 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4442 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4445 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4446 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4449 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4450 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4453 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4454 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4457 /* This thread is about to do pthread_cond_signal on the
4458 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4459 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4460 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4461 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4464 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4465 Returns a flag indicating whether or not the mutex is believed to be
4466 valid for this operation. */
4467 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4469 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4471 *ret = mutex_is_valid ? 1 : 0;
4476 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4477 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4480 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4482 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4483 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4484 (void*)args[1], (void*)args[2] );
4487 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4488 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4491 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4492 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4495 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
4496 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
4497 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4501 /* rwlock=arg[1], isW=arg[2] */
4502 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4503 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4506 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4507 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4510 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4511 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4514 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4515 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
4518 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4519 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
4522 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4523 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4526 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4527 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
4530 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
4531 /* pth_bar_t*, ulong count, ulong resizable */
4532 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4536 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4537 /* pth_bar_t*, ulong newcount */
4538 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4542 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4544 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4547 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4549 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4552 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4553 /* pth_spinlock_t* */
4554 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4557 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4558 /* pth_spinlock_t* */
4559 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4562 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4563 /* pth_spinlock_t*, Word */
4564 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4567 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4568 /* pth_spinlock_t* */
4569 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4572 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4573 /* pth_spinlock_t* */
4574 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4577 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4579 HChar* who = (HChar*)args[1];
4581 Thread* thr = map_threads_maybe_lookup( tid );
4582 tl_assert( thr ); /* I must be mapped */
4584 tl_assert( VG_(strlen)(who) <= 50 );
4585 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4586 /* record_error_Misc strdup's buf, so this is safe: */
4587 HG_(record_error_Misc)( thr, buf );
4591 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4592 /* UWord arbitrary-SO-tag */
4593 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4596 case _VG_USERREQ__HG_USERSO_RECV_POST:
4597 /* UWord arbitrary-SO-tag */
4598 evh__HG_USERSO_RECV_POST( tid, args[1] );
4602 /* Unhandled Helgrind client request! */
4603 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4611 /*----------------------------------------------------------------*/
4613 /*----------------------------------------------------------------*/
4615 static Bool hg_process_cmd_line_option ( Char* arg )
4619 if VG_BOOL_CLO(arg, "--track-lockorders",
4620 HG_(clo_track_lockorders)) {}
4621 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4622 HG_(clo_cmp_race_err_addrs)) {}
4624 else if VG_XACT_CLO(arg, "--history-level=none",
4625 HG_(clo_history_level), 0);
4626 else if VG_XACT_CLO(arg, "--history-level=approx",
4627 HG_(clo_history_level), 1);
4628 else if VG_XACT_CLO(arg, "--history-level=full",
4629 HG_(clo_history_level), 2);
4631 /* If you change the 10k/30mill limits, remember to also change
4632 them in assertions at the top of event_map_maybe_GC. */
4633 else if VG_BINT_CLO(arg, "--conflict-cache-size",
4634 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
4636 /* "stuvwx" --> stuvwx (binary) */
4637 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
4640 if (6 != VG_(strlen)(tmp_str)) {
4641 VG_(message)(Vg_UserMsg,
4642 "--hg-sanity-flags argument must have 6 digits\n");
4645 for (j = 0; j < 6; j++) {
4646 if ('0' == tmp_str[j]) { /* do nothing */ }
4647 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
4649 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
4650 "only contain 0s and 1s\n");
4654 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
4658 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4663 static void hg_print_usage ( void )
4666 " --track-lockorders=no|yes show lock ordering errors? [yes]\n"
4667 " --history-level=none|approx|full [full]\n"
4668 " full: show both stack traces for a data race (can be very slow)\n"
4669 " approx: full trace for one thread, approx for the other (faster)\n"
4670 " none: only show trace for one thread in a race (fastest)\n"
4671 " --conflict-cache-size=N size of 'full' history cache [1000000]\n"
4675 static void hg_print_debug_usage ( void )
4677 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4678 "race errors significant? [no]\n");
4679 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
4680 " at events (X = 0|1) [000000]\n");
4681 VG_(printf)(" --hg-sanity-flags values:\n");
4682 VG_(printf)(" 010000 after changes to "
4683 "lock-order-acquisition-graph\n");
4684 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4685 VG_(printf)(" 000100 at mem permission setting for "
4686 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
4687 VG_(printf)(" 000010 at lock/unlock events\n");
4688 VG_(printf)(" 000001 at thread create/join events\n");
4691 static void hg_post_clo_init ( void )
4695 static void hg_fini ( Int exitcode )
4697 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4698 VG_(message)(Vg_UserMsg,
4699 "For counts of detected and suppressed errors, "
4700 "rerun with: -v\n");
4703 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4704 && HG_(clo_history_level) >= 2) {
4706 "Use --history-level=approx or =none to gain increased speed, at\n" );
4708 "the cost of reduced accuracy of conflicting-access information\n");
4711 if (SHOW_DATA_STRUCTURES)
4712 pp_everything( PP_ALL, "SK_(fini)" );
4713 if (HG_(clo_sanity_flags))
4714 all__sanity_check("SK_(fini)");
4716 if (VG_(clo_stats)) {
4720 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4722 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4724 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4727 //zz VG_(printf)("\n");
4728 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4729 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4730 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4731 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4732 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4733 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4734 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4735 //zz stats__hbefore_stk_hwm);
4736 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4737 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
4740 VG_(printf)(" locksets: %'8d unique lock sets\n",
4741 (Int)HG_(cardinalityWSU)( univ_lsets ));
4742 VG_(printf)(" threadsets: %'8d unique thread sets\n",
4743 (Int)HG_(cardinalityWSU)( univ_tsets ));
4744 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4745 (Int)HG_(cardinalityWSU)( univ_laog ));
4747 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4748 // stats__ga_LL_adds,
4749 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
4751 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4752 HG_(stats__LockN_to_P_queries),
4753 HG_(stats__LockN_to_P_get_map_size)() );
4755 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4756 HG_(stats__string_table_queries),
4757 HG_(stats__string_table_get_map_size)() );
4758 VG_(printf)(" LAOG: %'8d map size\n",
4759 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4760 VG_(printf)(" LAOG exposition: %'8d map size\n",
4761 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4762 VG_(printf)(" locks: %'8lu acquires, "
4764 stats__lockN_acquires,
4765 stats__lockN_releases
4767 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
4770 libhb_shutdown(True);
4774 /* FIXME: move these somewhere sane */
4777 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4783 thr = libhb_get_Thr_opaque( hbt );
4785 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4786 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4788 tl_assert(nActual <= nRequest);
4789 for (; nActual < nRequest; nActual++)
4790 frames[nActual] = 0;
4794 ExeContext* for_libhb__get_EC ( Thr* hbt )
4800 thr = libhb_get_Thr_opaque( hbt );
4802 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4803 /* this will assert if tid is invalid */
4804 ec = VG_(record_ExeContext)( tid, 0 );
4809 static void hg_pre_clo_init ( void )
4813 VG_(details_name) ("Helgrind");
4814 VG_(details_version) (NULL);
4815 VG_(details_description) ("a thread error detector");
4816 VG_(details_copyright_author)(
4817 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
4818 VG_(details_bug_reports_to) (VG_BUGS_TO);
4819 VG_(details_avg_translation_sizeB) ( 200 );
4821 VG_(basic_tool_funcs) (hg_post_clo_init,
4825 VG_(needs_core_errors) ();
4826 VG_(needs_tool_errors) (HG_(eq_Error),
4827 HG_(before_pp_Error),
4829 False,/*show TIDs for errors*/
4831 HG_(recognised_suppression),
4832 HG_(read_extra_suppression_info),
4833 HG_(error_matches_suppression),
4834 HG_(get_error_name),
4835 HG_(get_extra_suppression_info));
4837 VG_(needs_xml_output) ();
4839 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4841 hg_print_debug_usage);
4842 VG_(needs_client_requests) (hg_handle_client_request);
4845 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4846 // hg_expensive_sanity_check);
4848 VG_(needs_malloc_replacement) (hg_cli__malloc,
4849 hg_cli____builtin_new,
4850 hg_cli____builtin_vec_new,
4854 hg_cli____builtin_delete,
4855 hg_cli____builtin_vec_delete,
4857 hg_cli_malloc_usable_size,
4858 HG_CLI__MALLOC_REDZONE_SZB );
4860 /* 21 Dec 08: disabled this; it mostly causes H to start more
4861 slowly and use significantly more memory, without very often
4862 providing useful results. The user can request to load this
4863 information manually with --read-var-info=yes. */
4864 if (0) VG_(needs_var_info)(); /* optional */
4866 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
4867 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4868 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
4869 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
4870 VG_(track_new_mem_stack) ( evh__new_mem_stack );
4872 // FIXME: surely this isn't thread-aware
4873 VG_(track_copy_mem_remap) ( evh__copy_mem );
4875 VG_(track_change_mem_mprotect) ( evh__set_perms );
4877 VG_(track_die_mem_stack_signal)( evh__die_mem );
4878 VG_(track_die_mem_brk) ( evh__die_mem );
4879 VG_(track_die_mem_munmap) ( evh__die_mem );
4880 VG_(track_die_mem_stack) ( evh__die_mem );
4882 // FIXME: what is this for?
4883 VG_(track_ban_mem_stack) (NULL);
4885 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4886 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4887 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4888 VG_(track_post_mem_write) (NULL);
4892 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4893 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4895 VG_(track_start_client_code)( evh__start_client_code );
4896 VG_(track_stop_client_code)( evh__stop_client_code );
4898 /////////////////////////////////////////////
4899 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4900 for_libhb__get_EC );
4901 /////////////////////////////////////////////
4903 initialise_data_structures(hbthr_root);
4905 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4906 as described in comments at the top of pub_tool_hashtable.h, are
4908 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4909 tl_assert( sizeof(UWord) == sizeof(Addr) );
4911 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4915 VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4917 /*--------------------------------------------------------------------*/
4918 /*--- end hg_main.c ---*/
4919 /*--------------------------------------------------------------------*/