2 /*--------------------------------------------------------------------*/
3 /*--- Helgrind: a Valgrind tool for detecting errors ---*/
4 /*--- in threaded programs. hg_main.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2010 OpenWorks LLP
14 Copyright (C) 2007-2010 Apple, Inc.
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
31 The GNU General Public License is contained in the file COPYING.
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
39 #include "pub_tool_basics.h"
40 #include "pub_tool_libcassert.h"
41 #include "pub_tool_libcbase.h"
42 #include "pub_tool_libcprint.h"
43 #include "pub_tool_threadstate.h"
44 #include "pub_tool_tooliface.h"
45 #include "pub_tool_hashtable.h"
46 #include "pub_tool_replacemalloc.h"
47 #include "pub_tool_machine.h"
48 #include "pub_tool_options.h"
49 #include "pub_tool_xarray.h"
50 #include "pub_tool_stacktrace.h"
51 #include "pub_tool_wordfm.h"
52 #include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53 #include "pub_tool_redir.h" // sonames for the dynamic linkers
54 #include "pub_tool_vki.h" // VKI_PAGE_SIZE
55 #include "pub_tool_libcproc.h" // VG_(atfork)
56 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
58 #include "hg_basics.h"
59 #include "hg_wordset.h"
60 #include "hg_lock_n_thread.h"
61 #include "hg_errors.h"
68 // FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
70 // FIXME: when client destroys a lock or a CV, remove these
71 // from our mappings, so that the associated SO can be freed up
73 /*----------------------------------------------------------------*/
75 /*----------------------------------------------------------------*/
77 /* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
84 // FIXME what is supposed to happen to locks in memory which
85 // is relocated as a result of client realloc?
87 // FIXME put referencing ThreadId into Thread and get
88 // rid of the slow reverse mapping function.
90 // FIXME accesses to NoAccess areas: change state to Excl?
92 // FIXME report errors for accesses of NoAccess memory?
94 // FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95 // the thread still holds the lock.
97 /* ------------ Debug/trace options ------------ */
99 // 0 for silent, 1 for some stuff, 2 for lots of stuff
100 #define SHOW_EVENTS 0
103 static void all__sanity_check ( Char* who ); /* fwds */
105 #define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
107 // 0 for none, 1 for dump at end of run
108 #define SHOW_DATA_STRUCTURES 0
111 /* ------------ Misc comments ------------ */
113 // FIXME: don't hardwire initial entries for root thread.
114 // Instead, let the pre_thread_ll_create handler do this.
117 /*----------------------------------------------------------------*/
118 /*--- Primary data structures ---*/
119 /*----------------------------------------------------------------*/
121 /* Admin linked list of Threads */
122 static Thread* admin_threads = NULL;
124 /* Admin double linked list of Locks */
125 /* We need a double linked list to properly and efficiently
127 static Lock* admin_locks = NULL;
129 /* Mapping table for core ThreadIds to Thread* */
130 static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
132 /* Mapping table for lock guest addresses to Lock* */
133 static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
135 /* The word-set universes for lock sets. */
136 static WordSetU* univ_lsets = NULL; /* sets of Lock* */
137 static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
140 /*----------------------------------------------------------------*/
141 /*--- Simple helpers for the data structures ---*/
142 /*----------------------------------------------------------------*/
144 static UWord stats__lockN_acquires = 0;
145 static UWord stats__lockN_releases = 0;
148 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
150 /* --------- Constructors --------- */
152 static Thread* mk_Thread ( Thr* hbthr ) {
154 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
155 thread->locksetA = HG_(emptyWS)( univ_lsets );
156 thread->locksetW = HG_(emptyWS)( univ_lsets );
157 thread->magic = Thread_MAGIC;
158 thread->hbthr = hbthr;
159 thread->coretid = VG_INVALID_THREADID;
160 thread->created_at = NULL;
161 thread->announced = False;
162 thread->errmsg_index = indx++;
163 thread->admin = admin_threads;
164 admin_threads = thread;
168 // Make a new lock which is unlocked (hence ownerless)
169 // and insert the new lock in admin_locks double linked list.
170 static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
171 static ULong unique = 0;
172 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
173 /* begin: add to double linked list */
175 admin_locks->admin_prev = lock;
176 lock->admin_next = admin_locks;
177 lock->admin_prev = NULL;
180 lock->unique = unique++;
181 lock->magic = LockN_MAGIC;
182 lock->appeared_at = NULL;
183 lock->acquired_at = NULL;
184 lock->hbso = libhb_so_alloc();
185 lock->guestaddr = guestaddr;
189 tl_assert(HG_(is_sane_LockN)(lock));
193 /* Release storage for a Lock. Also release storage in .heldBy, if
194 any. Removes from admin_locks double linked list. */
195 static void del_LockN ( Lock* lk )
197 tl_assert(HG_(is_sane_LockN)(lk));
199 libhb_so_dealloc(lk->hbso);
201 VG_(deleteBag)( lk->heldBy );
202 /* begin: del lock from double linked list */
203 if (lk == admin_locks) {
204 tl_assert(lk->admin_prev == NULL);
206 lk->admin_next->admin_prev = NULL;
207 admin_locks = lk->admin_next;
210 tl_assert(lk->admin_prev != NULL);
211 lk->admin_prev->admin_next = lk->admin_next;
213 lk->admin_next->admin_prev = lk->admin_prev;
216 VG_(memset)(lk, 0xAA, sizeof(*lk));
220 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
221 it. This is done strictly: only combinations resulting from
222 correct program and libpthread behaviour are allowed. */
223 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
225 tl_assert(HG_(is_sane_LockN)(lk));
226 tl_assert(HG_(is_sane_Thread)(thr));
228 stats__lockN_acquires++;
230 /* EXPOSITION only */
231 /* We need to keep recording snapshots of where the lock was
232 acquired, so as to produce better lock-order error messages. */
233 if (lk->acquired_at == NULL) {
235 tl_assert(lk->heldBy == NULL);
236 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
238 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
240 tl_assert(lk->heldBy != NULL);
242 /* end EXPOSITION only */
247 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
248 tl_assert(!lk->heldW);
250 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
251 VG_(addToBag)( lk->heldBy, (Word)thr );
254 if (lk->heldBy == NULL)
256 /* 2nd and subsequent locking of a lock by its owner */
257 tl_assert(lk->heldW);
258 /* assert: lk is only held by one thread .. */
259 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
260 /* assert: .. and that thread is 'thr'. */
261 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
262 == VG_(sizeTotalBag)(lk->heldBy));
263 VG_(addToBag)(lk->heldBy, (Word)thr);
266 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
271 tl_assert(HG_(is_sane_LockN)(lk));
274 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
276 tl_assert(HG_(is_sane_LockN)(lk));
277 tl_assert(HG_(is_sane_Thread)(thr));
278 /* can only add reader to a reader-writer lock. */
279 tl_assert(lk->kind == LK_rdwr);
280 /* lk must be free or already r-held. */
281 tl_assert(lk->heldBy == NULL
282 || (lk->heldBy != NULL && !lk->heldW));
284 stats__lockN_acquires++;
286 /* EXPOSITION only */
287 /* We need to keep recording snapshots of where the lock was
288 acquired, so as to produce better lock-order error messages. */
289 if (lk->acquired_at == NULL) {
291 tl_assert(lk->heldBy == NULL);
292 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
294 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
296 tl_assert(lk->heldBy != NULL);
298 /* end EXPOSITION only */
301 VG_(addToBag)(lk->heldBy, (Word)thr);
304 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
305 VG_(addToBag)( lk->heldBy, (Word)thr );
307 tl_assert(!lk->heldW);
308 tl_assert(HG_(is_sane_LockN)(lk));
311 /* Update 'lk' to reflect a release of it by 'thr'. This is done
312 strictly: only combinations resulting from correct program and
313 libpthread behaviour are allowed. */
315 static void lockN_release ( Lock* lk, Thread* thr )
318 tl_assert(HG_(is_sane_LockN)(lk));
319 tl_assert(HG_(is_sane_Thread)(thr));
320 /* lock must be held by someone */
321 tl_assert(lk->heldBy);
322 stats__lockN_releases++;
323 /* Remove it from the holder set */
324 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
325 /* thr must actually have been a holder of lk */
328 tl_assert(lk->acquired_at);
329 if (VG_(isEmptyBag)(lk->heldBy)) {
330 VG_(deleteBag)(lk->heldBy);
333 lk->acquired_at = NULL;
335 tl_assert(HG_(is_sane_LockN)(lk));
338 static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
342 tl_assert(!lk->heldW);
345 /* for each thread that holds this lock do ... */
346 VG_(initIterBag)( lk->heldBy );
347 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
348 tl_assert(HG_(is_sane_Thread)(thr));
349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetA, (Word)lk ));
352 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
355 tl_assert(HG_(elemWS)( univ_lsets,
356 thr->locksetW, (Word)lk ));
358 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
361 VG_(doneIterBag)( lk->heldBy );
365 /*----------------------------------------------------------------*/
366 /*--- Print out the primary data structures ---*/
367 /*----------------------------------------------------------------*/
369 #define PP_THREADS (1<<1)
370 #define PP_LOCKS (1<<2)
371 #define PP_ALL (PP_THREADS | PP_LOCKS)
374 static const Int sHOW_ADMIN = 0;
376 static void space ( Int n )
380 tl_assert(n >= 0 && n < 128);
383 for (i = 0; i < n; i++)
386 tl_assert(i < 128+1);
387 VG_(printf)("%s", spaces);
390 static void pp_Thread ( Int d, Thread* t )
392 space(d+0); VG_(printf)("Thread %p {\n", t);
394 space(d+3); VG_(printf)("admin %p\n", t->admin);
395 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
397 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
398 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
399 space(d+0); VG_(printf)("}\n");
402 static void pp_admin_threads ( Int d )
406 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
409 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
410 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
413 VG_(printf)("admin_threads record %d of %d:\n", i, n);
417 space(d); VG_(printf)("}\n");
420 static void pp_map_threads ( Int d )
423 space(d); VG_(printf)("map_threads ");
424 for (i = 0; i < VG_N_THREADS; i++) {
425 if (map_threads[i] != NULL)
428 VG_(printf)("(%d entries) {\n", n);
429 for (i = 0; i < VG_N_THREADS; i++) {
430 if (map_threads[i] == NULL)
433 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
435 space(d); VG_(printf)("}\n");
438 static const HChar* show_LockKind ( LockKind lkk ) {
440 case LK_mbRec: return "mbRec";
441 case LK_nonRec: return "nonRec";
442 case LK_rdwr: return "rdwr";
443 default: tl_assert(0);
447 static void pp_Lock ( Int d, Lock* lk )
449 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
451 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
452 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
453 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
455 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
456 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
457 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
458 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
463 VG_(initIterBag)( lk->heldBy );
464 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
465 VG_(printf)("%lu:%p ", count, thr);
466 VG_(doneIterBag)( lk->heldBy );
470 space(d+0); VG_(printf)("}\n");
473 static void pp_admin_locks ( Int d )
477 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
480 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
481 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
484 VG_(printf)("admin_locks record %d of %d:\n", i, n);
488 space(d); VG_(printf)("}\n");
491 static void pp_map_locks ( Int d )
495 space(d); VG_(printf)("map_locks (%d entries) {\n",
496 (Int)VG_(sizeFM)( map_locks ));
497 VG_(initIterFM)( map_locks );
498 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
501 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
503 VG_(doneIterFM)( map_locks );
504 space(d); VG_(printf)("}\n");
507 static void pp_everything ( Int flags, Char* caller )
511 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
512 if (flags & PP_THREADS) {
514 pp_admin_threads(d+3);
518 if (flags & PP_LOCKS) {
533 /*----------------------------------------------------------------*/
534 /*--- Initialise the primary data structures ---*/
535 /*----------------------------------------------------------------*/
537 static void initialise_data_structures ( Thr* hbthr_root )
541 /* Get everything initialised and zeroed. */
542 tl_assert(admin_threads == NULL);
543 tl_assert(admin_locks == NULL);
545 tl_assert(sizeof(Addr) == sizeof(Word));
547 tl_assert(map_threads == NULL);
548 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
549 tl_assert(map_threads != NULL);
551 tl_assert(sizeof(Addr) == sizeof(Word));
552 tl_assert(map_locks == NULL);
553 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
554 NULL/*unboxed Word cmp*/);
555 tl_assert(map_locks != NULL);
557 tl_assert(univ_lsets == NULL);
558 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
560 tl_assert(univ_lsets != NULL);
562 tl_assert(univ_laog == NULL);
563 if (HG_(clo_track_lockorders)) {
564 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
565 HG_(free), 24/*cacheSize*/ );
566 tl_assert(univ_laog != NULL);
569 /* Set up entries for the root thread */
570 // FIXME: this assumes that the first real ThreadId is 1
572 /* a Thread for the new thread ... */
573 thr = mk_Thread(hbthr_root);
574 thr->coretid = 1; /* FIXME: hardwires an assumption about the
575 identity of the root thread. */
576 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
577 libhb_set_Thr_hgthread(hbthr_root, thr);
579 /* and bind it in the thread-map table. */
580 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
581 tl_assert(thr->coretid != VG_INVALID_THREADID);
583 map_threads[thr->coretid] = thr;
585 tl_assert(VG_INVALID_THREADID == 0);
587 all__sanity_check("initialise_data_structures");
591 /*----------------------------------------------------------------*/
592 /*--- map_threads :: array[core-ThreadId] of Thread* ---*/
593 /*----------------------------------------------------------------*/
595 /* Doesn't assert if the relevant map_threads entry is NULL. */
596 static Thread* map_threads_maybe_lookup ( ThreadId coretid )
599 tl_assert( HG_(is_sane_ThreadId)(coretid) );
600 thr = map_threads[coretid];
604 /* Asserts if the relevant map_threads entry is NULL. */
605 static inline Thread* map_threads_lookup ( ThreadId coretid )
608 tl_assert( HG_(is_sane_ThreadId)(coretid) );
609 thr = map_threads[coretid];
614 /* Do a reverse lookup. Does not assert if 'thr' is not found in
616 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
619 tl_assert(HG_(is_sane_Thread)(thr));
620 /* Check nobody used the invalid-threadid slot */
621 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
622 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
624 tl_assert(HG_(is_sane_ThreadId)(tid));
628 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
629 is not found in map_threads. */
630 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
632 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
633 tl_assert(tid != VG_INVALID_THREADID);
634 tl_assert(map_threads[tid]);
635 tl_assert(map_threads[tid]->coretid == tid);
639 static void map_threads_delete ( ThreadId coretid )
642 tl_assert(coretid != 0);
643 tl_assert( HG_(is_sane_ThreadId)(coretid) );
644 thr = map_threads[coretid];
646 map_threads[coretid] = NULL;
650 /*----------------------------------------------------------------*/
651 /*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
652 /*----------------------------------------------------------------*/
654 /* Make sure there is a lock table entry for the given (lock) guest
655 address. If not, create one of the stated 'kind' in unheld state.
656 In any case, return the address of the existing or new Lock. */
658 Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
661 Lock* oldlock = NULL;
662 tl_assert(HG_(is_sane_ThreadId)(tid));
663 found = VG_(lookupFM)( map_locks,
664 NULL, (Word*)&oldlock, (Word)ga );
666 Lock* lock = mk_LockN(lkk, ga);
667 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
668 tl_assert(HG_(is_sane_LockN)(lock));
669 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
670 tl_assert(oldlock == NULL);
673 tl_assert(oldlock != NULL);
674 tl_assert(HG_(is_sane_LockN)(oldlock));
675 tl_assert(oldlock->guestaddr == ga);
680 static Lock* map_locks_maybe_lookup ( Addr ga )
684 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
685 tl_assert(found ? lk != NULL : lk == NULL);
689 static void map_locks_delete ( Addr ga )
693 VG_(delFromFM)( map_locks,
694 (Word*)&ga2, (Word*)&lk, (Word)ga );
695 /* delFromFM produces the val which is being deleted, if it is
696 found. So assert it is non-null; that in effect asserts that we
697 are deleting a (ga, Lock) pair which actually exists. */
698 tl_assert(lk != NULL);
699 tl_assert(ga2 == ga);
704 /*----------------------------------------------------------------*/
705 /*--- Sanity checking the data structures ---*/
706 /*----------------------------------------------------------------*/
708 static UWord stats__sanity_checks = 0;
710 static void laog__sanity_check ( Char* who ); /* fwds */
712 /* REQUIRED INVARIANTS:
714 Thread vs Segment/Lock/SecMaps
716 for each t in Threads {
718 // Thread.lockset: each element is really a valid Lock
720 // Thread.lockset: each Lock in set is actually held by that thread
721 for lk in Thread.lockset
724 // Thread.csegid is a valid SegmentID
725 // and the associated Segment has .thr == t
729 all thread Locksets are pairwise empty under intersection
730 (that is, no lock is claimed to be held by more than one thread)
731 -- this is guaranteed if all locks in locksets point back to their
734 Lock vs Thread/Segment/SecMaps
736 for each entry (gla, la) in map_locks
737 gla == la->guest_addr
739 for each lk in Locks {
742 lk->guest_addr does not have shadow state NoAccess
743 if lk == LockedBy(t), then t->lockset contains lk
744 if lk == UnlockedBy(segid) then segid is valid SegmentID
745 and can be mapped to a valid Segment(seg)
746 and seg->thr->lockset does not contain lk
747 if lk == UnlockedNew then (no lockset contains lk)
749 secmaps for lk has .mbHasLocks == True
753 Segment vs Thread/Lock/SecMaps
755 the Segment graph is a dag (no cycles)
756 all of the Segment graph must be reachable from the segids
757 mentioned in the Threads
759 for seg in Segments {
761 seg->thr is a sane Thread
765 SecMaps vs Segment/Thread/Lock
770 if any shadow word is ShR or ShM then .mbHasShared == True
772 for each Excl(segid) state
773 map_segments_lookup maps to a sane Segment(seg)
774 for each ShM/ShR(tsetid,lsetid) state
775 each lk in lset is a valid Lock
776 each thr in tset is a valid thread, which is non-dead
782 /* Return True iff 'thr' holds 'lk' in some mode. */
783 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
786 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
791 /* Sanity check Threads, as far as possible */
792 __attribute__((noinline))
793 static void threads__sanity_check ( Char* who )
795 #define BAD(_str) do { how = (_str); goto bad; } while (0)
796 Char* how = "no error";
802 for (thr = admin_threads; thr; thr = thr->admin) {
803 if (!HG_(is_sane_Thread)(thr)) BAD("1");
806 // locks held in W mode are a subset of all locks held
807 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
808 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
809 for (i = 0; i < ls_size; i++) {
810 lk = (Lock*)ls_words[i];
811 // Thread.lockset: each element is really a valid Lock
812 if (!HG_(is_sane_LockN)(lk)) BAD("2");
813 // Thread.lockset: each Lock in set is actually held by that
815 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
820 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
826 /* Sanity check Locks, as far as possible */
827 __attribute__((noinline))
828 static void locks__sanity_check ( Char* who )
830 #define BAD(_str) do { how = (_str); goto bad; } while (0)
831 Char* how = "no error";
835 // # entries in admin_locks == # entries in map_locks
836 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
838 if (i != VG_(sizeFM)(map_locks)) BAD("1");
839 // for each entry (gla, lk) in map_locks
840 // gla == lk->guest_addr
841 VG_(initIterFM)( map_locks );
842 while (VG_(nextIterFM)( map_locks,
843 (Word*)&gla, (Word*)&lk )) {
844 if (lk->guestaddr != gla) BAD("2");
846 VG_(doneIterFM)( map_locks );
847 // scan through admin_locks ...
848 for (lk = admin_locks; lk; lk = lk->admin_next) {
849 // lock is sane. Quite comprehensive, also checks that
850 // referenced (holder) threads are sane.
851 if (!HG_(is_sane_LockN)(lk)) BAD("3");
852 // map_locks binds guest address back to this lock
853 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
854 // look at all threads mentioned as holders of this lock. Ensure
855 // this lock is mentioned in their locksets.
859 VG_(initIterBag)( lk->heldBy );
860 while (VG_(nextIterBag)( lk->heldBy,
861 (Word*)&thr, &count )) {
862 // HG_(is_sane_LockN) above ensures these
863 tl_assert(count >= 1);
864 tl_assert(HG_(is_sane_Thread)(thr));
865 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
867 // also check the w-only lockset
869 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
872 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
875 VG_(doneIterBag)( lk->heldBy );
877 /* lock not held by anybody */
878 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
879 // since lk is unheld, then (no lockset contains lk)
880 // hmm, this is really too expensive to check. Hmm.
886 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
892 static void all_except_Locks__sanity_check ( Char* who ) {
893 stats__sanity_checks++;
894 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
895 threads__sanity_check(who);
896 if (HG_(clo_track_lockorders))
897 laog__sanity_check(who);
899 static void all__sanity_check ( Char* who ) {
900 all_except_Locks__sanity_check(who);
901 locks__sanity_check(who);
905 /*----------------------------------------------------------------*/
906 /*--- Shadow value and address range handlers ---*/
907 /*----------------------------------------------------------------*/
909 static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
910 //static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
911 static inline Thread* get_current_Thread ( void ); /* fwds */
912 __attribute__((noinline))
913 static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
916 /* Block-copy states (needed for implementing realloc()). */
917 /* FIXME this copies shadow memory; it doesn't apply the MSM to it.
918 Is that a problem? (hence 'scopy' rather than 'ccopy') */
919 static void shadow_mem_scopy_range ( Thread* thr,
920 Addr src, Addr dst, SizeT len )
922 Thr* hbthr = thr->hbthr;
924 libhb_copy_shadow_state( hbthr, src, dst, len );
927 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
929 Thr* hbthr = thr->hbthr;
931 LIBHB_CREAD_N(hbthr, a, len);
934 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
935 Thr* hbthr = thr->hbthr;
937 LIBHB_CWRITE_N(hbthr, a, len);
940 static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
942 libhb_srange_new( thr->hbthr, a, len );
945 static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
948 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
949 // has no effect (NoFX)
950 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
953 static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
956 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
957 // Actually Has An Effect (AHAE)
958 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
961 static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
964 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
965 libhb_srange_untrack( thr->hbthr, aIN, len );
969 /*----------------------------------------------------------------*/
970 /*--- Event handlers (evh__* functions) ---*/
971 /*--- plus helpers (evhH__* functions) ---*/
972 /*----------------------------------------------------------------*/
974 /*--------- Event handler helpers (evhH__* functions) ---------*/
976 /* Create a new segment for 'thr', making it depend (.prev) on its
977 existing segment, bind together the SegmentID and Segment, and
978 return both of them. Also update 'thr' so it references the new
981 //zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
982 //zz /*OUT*/Segment** new_segP,
985 //zz Segment* cur_seg;
986 //zz tl_assert(new_segP);
987 //zz tl_assert(new_segidP);
988 //zz tl_assert(HG_(is_sane_Thread)(thr));
989 //zz cur_seg = map_segments_lookup( thr->csegid );
990 //zz tl_assert(cur_seg);
991 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
992 //zz at their owner thread. */
993 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
994 //zz *new_segidP = alloc_SegmentID();
995 //zz map_segments_add( *new_segidP, *new_segP );
996 //zz thr->csegid = *new_segidP;
1000 /* The lock at 'lock_ga' has acquired a writer. Make all necessary
1001 updates, and also do all possible error checks. */
1003 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1004 LockKind lkk, Addr lock_ga )
1008 /* Basically what we need to do is call lockN_acquire_writer.
1009 However, that will barf if any 'invalid' lock states would
1010 result. Therefore check before calling. Side effect is that
1011 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1014 Because this routine is only called after successful lock
1015 acquisition, we should not be asked to move the lock into any
1016 invalid states. Requests to do so are bugs in libpthread, since
1017 that should have rejected any such requests. */
1019 tl_assert(HG_(is_sane_Thread)(thr));
1020 /* Try to find the lock. If we can't, then create a new one with
1022 lk = map_locks_lookup_or_create(
1023 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1024 tl_assert( HG_(is_sane_LockN)(lk) );
1026 /* check libhb level entities exist */
1027 tl_assert(thr->hbthr);
1028 tl_assert(lk->hbso);
1030 if (lk->heldBy == NULL) {
1031 /* the lock isn't held. Simple. */
1032 tl_assert(!lk->heldW);
1033 lockN_acquire_writer( lk, thr );
1034 /* acquire a dependency from the lock's VCs */
1035 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1039 /* So the lock is already held. If held as a r-lock then
1040 libpthread must be buggy. */
1041 tl_assert(lk->heldBy);
1043 HG_(record_error_Misc)(
1044 thr, "Bug in libpthread: write lock "
1045 "granted on rwlock which is currently rd-held");
1049 /* So the lock is held in w-mode. If it's held by some other
1050 thread, then libpthread must be buggy. */
1051 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
1053 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1054 HG_(record_error_Misc)(
1055 thr, "Bug in libpthread: write lock "
1056 "granted on mutex/rwlock which is currently "
1057 "wr-held by a different thread");
1061 /* So the lock is already held in w-mode by 'thr'. That means this
1062 is an attempt to lock it recursively, which is only allowable
1063 for LK_mbRec kinded locks. Since this routine is called only
1064 once the lock has been acquired, this must also be a libpthread
1066 if (lk->kind != LK_mbRec) {
1067 HG_(record_error_Misc)(
1068 thr, "Bug in libpthread: recursive write lock "
1069 "granted on mutex/wrlock which does not "
1070 "support recursion");
1074 /* So we are recursively re-locking a lock we already w-hold. */
1075 lockN_acquire_writer( lk, thr );
1076 /* acquire a dependency from the lock's VC. Probably pointless,
1077 but also harmless. */
1078 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1082 if (HG_(clo_track_lockorders)) {
1083 /* check lock order acquisition graph, and update. This has to
1084 happen before the lock is added to the thread's locksetA/W. */
1085 laog__pre_thread_acquires_lock( thr, lk );
1087 /* update the thread's held-locks set */
1088 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1089 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1093 tl_assert(HG_(is_sane_LockN)(lk));
1097 /* The lock at 'lock_ga' has acquired a reader. Make all necessary
1098 updates, and also do all possible error checks. */
1100 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1101 LockKind lkk, Addr lock_ga )
1105 /* Basically what we need to do is call lockN_acquire_reader.
1106 However, that will barf if any 'invalid' lock states would
1107 result. Therefore check before calling. Side effect is that
1108 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1111 Because this routine is only called after successful lock
1112 acquisition, we should not be asked to move the lock into any
1113 invalid states. Requests to do so are bugs in libpthread, since
1114 that should have rejected any such requests. */
1116 tl_assert(HG_(is_sane_Thread)(thr));
1117 /* Try to find the lock. If we can't, then create a new one with
1118 kind 'lkk'. Only a reader-writer lock can be read-locked,
1119 hence the first assertion. */
1120 tl_assert(lkk == LK_rdwr);
1121 lk = map_locks_lookup_or_create(
1122 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1123 tl_assert( HG_(is_sane_LockN)(lk) );
1125 /* check libhb level entities exist */
1126 tl_assert(thr->hbthr);
1127 tl_assert(lk->hbso);
1129 if (lk->heldBy == NULL) {
1130 /* the lock isn't held. Simple. */
1131 tl_assert(!lk->heldW);
1132 lockN_acquire_reader( lk, thr );
1133 /* acquire a dependency from the lock's VC */
1134 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1138 /* So the lock is already held. If held as a w-lock then
1139 libpthread must be buggy. */
1140 tl_assert(lk->heldBy);
1142 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1143 "granted on rwlock which is "
1144 "currently wr-held");
1148 /* Easy enough. In short anybody can get a read-lock on a rwlock
1149 provided it is either unlocked or already in rd-held. */
1150 lockN_acquire_reader( lk, thr );
1151 /* acquire a dependency from the lock's VC. Probably pointless,
1152 but also harmless. */
1153 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1157 if (HG_(clo_track_lockorders)) {
1158 /* check lock order acquisition graph, and update. This has to
1159 happen before the lock is added to the thread's locksetA/W. */
1160 laog__pre_thread_acquires_lock( thr, lk );
1162 /* update the thread's held-locks set */
1163 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1164 /* but don't update thr->locksetW, since lk is only rd-held */
1168 tl_assert(HG_(is_sane_LockN)(lk));
1172 /* The lock at 'lock_ga' is just about to be unlocked. Make all
1173 necessary updates, and also do all possible error checks. */
1175 void evhH__pre_thread_releases_lock ( Thread* thr,
1176 Addr lock_ga, Bool isRDWR )
1182 /* This routine is called prior to a lock release, before
1183 libpthread has had a chance to validate the call. Hence we need
1184 to detect and reject any attempts to move the lock into an
1185 invalid state. Such attempts are bugs in the client.
1187 isRDWR is True if we know from the wrapper context that lock_ga
1188 should refer to a reader-writer lock, and is False if [ditto]
1189 lock_ga should refer to a standard mutex. */
1191 tl_assert(HG_(is_sane_Thread)(thr));
1192 lock = map_locks_maybe_lookup( lock_ga );
1195 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1196 the client is trying to unlock it. So complain, then ignore
1198 HG_(record_error_UnlockBogus)( thr, lock_ga );
1202 tl_assert(lock->guestaddr == lock_ga);
1203 tl_assert(HG_(is_sane_LockN)(lock));
1205 if (isRDWR && lock->kind != LK_rdwr) {
1206 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1207 "pthread_mutex_t* argument " );
1209 if ((!isRDWR) && lock->kind == LK_rdwr) {
1210 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1211 "pthread_rwlock_t* argument " );
1214 if (!lock->heldBy) {
1215 /* The lock is not held. This indicates a serious bug in the
1217 tl_assert(!lock->heldW);
1218 HG_(record_error_UnlockUnlocked)( thr, lock );
1219 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1220 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1224 /* test just above dominates */
1225 tl_assert(lock->heldBy);
1226 was_heldW = lock->heldW;
1228 /* The lock is held. Is this thread one of the holders? If not,
1229 report a bug in the client. */
1230 n = VG_(elemBag)( lock->heldBy, (Word)thr );
1233 /* We are not a current holder of the lock. This is a bug in
1234 the guest, and (per POSIX pthread rules) the unlock
1235 attempt will fail. So just complain and do nothing
1237 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
1238 tl_assert(HG_(is_sane_Thread)(realOwner));
1239 tl_assert(realOwner != thr);
1240 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1241 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1242 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1246 /* Ok, we hold the lock 'n' times. */
1249 lockN_release( lock, thr );
1255 tl_assert(lock->heldBy);
1256 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
1257 /* We still hold the lock. So either it's a recursive lock
1258 or a rwlock which is currently r-held. */
1259 tl_assert(lock->kind == LK_mbRec
1260 || (lock->kind == LK_rdwr && !lock->heldW));
1261 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1263 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1265 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1267 /* n is zero. This means we don't hold the lock any more. But
1268 if it's a rwlock held in r-mode, someone else could still
1269 hold it. Just do whatever sanity checks we can. */
1270 if (lock->kind == LK_rdwr && lock->heldBy) {
1271 /* It's a rwlock. We no longer hold it but we used to;
1272 nevertheless it still appears to be held by someone else.
1273 The implication is that, prior to this release, it must
1274 have been shared by us and and whoever else is holding it;
1275 which in turn implies it must be r-held, since a lock
1276 can't be w-held by more than one thread. */
1277 /* The lock is now R-held by somebody else: */
1278 tl_assert(lock->heldW == False);
1280 /* Normal case. It's either not a rwlock, or it's a rwlock
1281 that we used to hold in w-mode (which is pretty much the
1282 same thing as a non-rwlock.) Since this transaction is
1283 atomic (V does not allow multiple threads to run
1284 simultaneously), it must mean the lock is now not held by
1285 anybody. Hence assert for it. */
1286 /* The lock is now not held by anybody: */
1287 tl_assert(!lock->heldBy);
1288 tl_assert(lock->heldW == False);
1290 //if (lock->heldBy) {
1291 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1293 /* update this thread's lockset accordingly. */
1295 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1297 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
1298 /* push our VC into the lock */
1299 tl_assert(thr->hbthr);
1300 tl_assert(lock->hbso);
1301 /* If the lock was previously W-held, then we want to do a
1302 strong send, and if previously R-held, then a weak send. */
1303 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1308 tl_assert(HG_(is_sane_LockN)(lock));
1312 /* ---------------------------------------------------------- */
1313 /* -------- Event handlers proper (evh__* functions) -------- */
1314 /* ---------------------------------------------------------- */
1316 /* What is the Thread* for the currently running thread? This is
1317 absolutely performance critical. We receive notifications from the
1318 core for client code starts/stops, and cache the looked-up result
1319 in 'current_Thread'. Hence, for the vast majority of requests,
1320 finding the current thread reduces to a read of a global variable,
1321 provided get_current_Thread_in_C_C is inlined.
1323 Outside of client code, current_Thread is NULL, and presumably
1324 any uses of it will cause a segfault. Hence:
1326 - for uses definitely within client code, use
1327 get_current_Thread_in_C_C.
1329 - for all other uses, use get_current_Thread.
1332 static Thread *current_Thread = NULL,
1333 *current_Thread_prev = NULL;
1335 static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1336 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1337 tl_assert(current_Thread == NULL);
1338 current_Thread = map_threads_lookup( tid );
1339 tl_assert(current_Thread != NULL);
1340 if (current_Thread != current_Thread_prev) {
1341 libhb_Thr_resumes( current_Thread->hbthr );
1342 current_Thread_prev = current_Thread;
1345 static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1346 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1347 tl_assert(current_Thread != NULL);
1348 current_Thread = NULL;
1351 static inline Thread* get_current_Thread_in_C_C ( void ) {
1352 return current_Thread;
1354 static inline Thread* get_current_Thread ( void ) {
1357 thr = get_current_Thread_in_C_C();
1360 /* evidently not in client code. Do it the slow way. */
1361 coretid = VG_(get_running_tid)();
1362 /* FIXME: get rid of the following kludge. It exists because
1363 evh__new_mem is called during initialisation (as notification
1364 of initial memory layout) and VG_(get_running_tid)() returns
1365 VG_INVALID_THREADID at that point. */
1366 if (coretid == VG_INVALID_THREADID)
1367 coretid = 1; /* KLUDGE */
1368 thr = map_threads_lookup( coretid );
1373 void evh__new_mem ( Addr a, SizeT len ) {
1374 if (SHOW_EVENTS >= 2)
1375 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1376 shadow_mem_make_New( get_current_Thread(), a, len );
1377 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1378 all__sanity_check("evh__new_mem-post");
1382 void evh__new_mem_stack ( Addr a, SizeT len ) {
1383 if (SHOW_EVENTS >= 2)
1384 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1385 shadow_mem_make_New( get_current_Thread(),
1386 -VG_STACK_REDZONE_SZB + a, len );
1387 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1388 all__sanity_check("evh__new_mem_stack-post");
1392 void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1393 if (SHOW_EVENTS >= 2)
1394 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1395 shadow_mem_make_New( get_current_Thread(), a, len );
1396 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1397 all__sanity_check("evh__new_mem_w_tid-post");
1401 void evh__new_mem_w_perms ( Addr a, SizeT len,
1402 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
1403 if (SHOW_EVENTS >= 1)
1404 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1405 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1407 shadow_mem_make_New( get_current_Thread(), a, len );
1408 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1409 all__sanity_check("evh__new_mem_w_perms-post");
1413 void evh__set_perms ( Addr a, SizeT len,
1414 Bool rr, Bool ww, Bool xx ) {
1415 // This handles mprotect requests. If the memory is being put
1416 // into no-R no-W state, paint it as NoAccess, for the reasons
1417 // documented at evh__die_mem_munmap().
1418 if (SHOW_EVENTS >= 1)
1419 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
1420 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1421 /* Hmm. What should we do here, that actually makes any sense?
1422 Let's say: if neither readable nor writable, then declare it
1423 NoAccess, else leave it alone. */
1425 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1426 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1427 all__sanity_check("evh__set_perms-post");
1431 void evh__die_mem ( Addr a, SizeT len ) {
1432 // Urr, libhb ignores this.
1433 if (SHOW_EVENTS >= 2)
1434 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1435 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
1436 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1437 all__sanity_check("evh__die_mem-post");
1441 void evh__die_mem_munmap ( Addr a, SizeT len ) {
1442 // It's important that libhb doesn't ignore this. If, as is likely,
1443 // the client is subject to address space layout randomization,
1444 // then unmapped areas may never get remapped over, even in long
1445 // runs. If we just ignore them we wind up with large resource
1446 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1447 // VTS references in the affected area are dropped. Marking memory
1448 // as NoAccess is expensive, but we assume that munmap is sufficiently
1449 // rare that the space gains of doing this are worth the costs.
1450 if (SHOW_EVENTS >= 2)
1451 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1452 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1456 void evh__untrack_mem ( Addr a, SizeT len ) {
1457 // Libhb doesn't ignore this.
1458 if (SHOW_EVENTS >= 2)
1459 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1460 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1461 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1462 all__sanity_check("evh__untrack_mem-post");
1466 void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1467 if (SHOW_EVENTS >= 2)
1468 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1469 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1470 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1471 all__sanity_check("evh__copy_mem-post");
1475 void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1477 if (SHOW_EVENTS >= 1)
1478 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1479 (Int)parent, (Int)child );
1481 if (parent != VG_INVALID_THREADID) {
1487 tl_assert(HG_(is_sane_ThreadId)(parent));
1488 tl_assert(HG_(is_sane_ThreadId)(child));
1489 tl_assert(parent != child);
1491 thr_p = map_threads_maybe_lookup( parent );
1492 thr_c = map_threads_maybe_lookup( child );
1494 tl_assert(thr_p != NULL);
1495 tl_assert(thr_c == NULL);
1497 hbthr_p = thr_p->hbthr;
1498 tl_assert(hbthr_p != NULL);
1499 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
1501 hbthr_c = libhb_create ( hbthr_p );
1503 /* Create a new thread record for the child. */
1504 /* a Thread for the new thread ... */
1505 thr_c = mk_Thread( hbthr_c );
1506 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1507 libhb_set_Thr_hgthread(hbthr_c, thr_c);
1509 /* and bind it in the thread-map table */
1510 map_threads[child] = thr_c;
1511 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1512 thr_c->coretid = child;
1514 /* Record where the parent is so we can later refer to this in
1517 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1518 The stack snapshot is taken immediately after the parent has
1519 returned from its sys_clone call. Unfortunately there is no
1520 unwind info for the insn following "syscall" - reading the
1521 glibc sources confirms this. So we ask for a snapshot to be
1522 taken as if RIP was 3 bytes earlier, in a place where there
1523 is unwind info. Sigh.
1525 { Word first_ip_delta = 0;
1526 # if defined(VGP_amd64_linux)
1527 first_ip_delta = -3;
1529 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1533 if (HG_(clo_sanity_flags) & SCE_THREADS)
1534 all__sanity_check("evh__pre_thread_create-post");
1538 void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1542 if (SHOW_EVENTS >= 1)
1543 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1546 /* quit_tid has disappeared without joining to any other thread.
1547 Therefore there is no synchronisation event associated with its
1548 exit and so we have to pretty much treat it as if it was still
1549 alive but mysteriously making no progress. That is because, if
1550 we don't know when it really exited, then we can never say there
1551 is a point in time when we're sure the thread really has
1552 finished, and so we need to consider the possibility that it
1553 lingers indefinitely and continues to interact with other
1555 /* However, it might have rendezvous'd with a thread that called
1556 pthread_join with this one as arg, prior to this point (that's
1557 how NPTL works). In which case there has already been a prior
1558 sync event. So in any case, just let the thread exit. On NPTL,
1559 all thread exits go through here. */
1560 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
1561 thr_q = map_threads_maybe_lookup( quit_tid );
1562 tl_assert(thr_q != NULL);
1564 /* Complain if this thread holds any locks. */
1565 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1566 tl_assert(nHeld >= 0);
1569 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1570 nHeld, nHeld > 1 ? "s" : "");
1571 HG_(record_error_Misc)( thr_q, buf );
1574 /* Not much to do here:
1575 - tell libhb the thread is gone
1576 - clear the map_threads entry, in order that the Valgrind core
1578 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1580 tl_assert(thr_q->hbthr);
1581 libhb_async_exit(thr_q->hbthr);
1582 tl_assert(thr_q->coretid == quit_tid);
1583 thr_q->coretid = VG_INVALID_THREADID;
1584 map_threads_delete( quit_tid );
1586 if (HG_(clo_sanity_flags) & SCE_THREADS)
1587 all__sanity_check("evh__pre_thread_ll_exit-post");
1590 /* This is called immediately after fork, for the child only. 'tid'
1591 is the only surviving thread (as per POSIX rules on fork() in
1592 threaded programs), so we have to clean up map_threads to remove
1593 entries for any other threads. */
1595 void evh__atfork_child ( ThreadId tid )
1599 /* Slot 0 should never be used. */
1600 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1602 /* Clean up all other slots except 'tid'. */
1603 for (i = 1; i < VG_N_THREADS; i++) {
1606 thr = map_threads_maybe_lookup(i);
1609 /* Cleanup actions (next 5 lines) copied from end of
1610 evh__pre_thread_ll_exit; keep in sync. */
1611 tl_assert(thr->hbthr);
1612 libhb_async_exit(thr->hbthr);
1613 tl_assert(thr->coretid == i);
1614 thr->coretid = VG_INVALID_THREADID;
1615 map_threads_delete(i);
1621 void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1629 if (SHOW_EVENTS >= 1)
1630 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1631 (Int)stay_tid, quit_thr );
1633 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1635 thr_s = map_threads_maybe_lookup( stay_tid );
1637 tl_assert(thr_s != NULL);
1638 tl_assert(thr_q != NULL);
1639 tl_assert(thr_s != thr_q);
1641 hbthr_s = thr_s->hbthr;
1642 hbthr_q = thr_q->hbthr;
1643 tl_assert(hbthr_s != hbthr_q);
1644 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1645 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1647 /* Allocate a temporary synchronisation object and use it to send
1648 an imaginary message from the quitter to the stayer, the purpose
1649 being to generate a dependence from the quitter to the
1651 so = libhb_so_alloc();
1653 /* Send last arg of _so_send as False, since the sending thread
1654 doesn't actually exist any more, so we don't want _so_send to
1655 try taking stack snapshots of it. */
1656 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1657 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1658 libhb_so_dealloc(so);
1660 /* evh__pre_thread_ll_exit issues an error message if the exiting
1661 thread holds any locks. No need to check here. */
1663 /* This holds because, at least when using NPTL as the thread
1664 library, we should be notified the low level thread exit before
1665 we hear of any join event on it. The low level exit
1666 notification feeds through into evh__pre_thread_ll_exit,
1667 which should clear the map_threads entry for it. Hence we
1668 expect there to be no map_threads entry at this point. */
1669 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1670 == VG_INVALID_THREADID);
1672 if (HG_(clo_sanity_flags) & SCE_THREADS)
1673 all__sanity_check("evh__post_thread_join-post");
1677 void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1678 Addr a, SizeT size) {
1679 if (SHOW_EVENTS >= 2
1680 || (SHOW_EVENTS >= 1 && size != 1))
1681 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1682 (Int)tid, s, (void*)a, size );
1683 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
1684 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1685 all__sanity_check("evh__pre_mem_read-post");
1689 void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1692 if (SHOW_EVENTS >= 1)
1693 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1694 (Int)tid, s, (void*)a );
1695 // Don't segfault if the string starts in an obviously stupid
1696 // place. Actually we should check the whole string, not just
1697 // the start address, but that's too much trouble. At least
1698 // checking the first byte is better than nothing. See #255009.
1699 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1701 len = VG_(strlen)( (Char*) a );
1702 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
1703 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1704 all__sanity_check("evh__pre_mem_read_asciiz-post");
1708 void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1709 Addr a, SizeT size ) {
1710 if (SHOW_EVENTS >= 1)
1711 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1712 (Int)tid, s, (void*)a, size );
1713 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
1714 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1715 all__sanity_check("evh__pre_mem_write-post");
1719 void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1720 if (SHOW_EVENTS >= 1)
1721 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1722 (void*)a, len, (Int)is_inited );
1723 // FIXME: this is kinda stupid
1725 shadow_mem_make_New(get_current_Thread(), a, len);
1727 shadow_mem_make_New(get_current_Thread(), a, len);
1729 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1730 all__sanity_check("evh__pre_mem_read-post");
1734 void evh__die_mem_heap ( Addr a, SizeT len ) {
1736 if (SHOW_EVENTS >= 1)
1737 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1738 thr = get_current_Thread();
1740 if (HG_(clo_free_is_write)) {
1741 /* Treat frees as if the memory was written immediately prior to
1742 the free. This shakes out more races, specifically, cases
1743 where memory is referenced by one thread, and freed by
1744 another, and there's no observable synchronisation event to
1745 guarantee that the reference happens before the free. */
1746 shadow_mem_cwrite_range(thr, a, len);
1748 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1749 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1750 all__sanity_check("evh__pre_mem_read-post");
1753 /* --- Event handlers called from generated code --- */
1755 static VG_REGPARM(1)
1756 void evh__mem_help_cread_1(Addr a) {
1757 Thread* thr = get_current_Thread_in_C_C();
1758 Thr* hbthr = thr->hbthr;
1759 LIBHB_CREAD_1(hbthr, a);
1762 static VG_REGPARM(1)
1763 void evh__mem_help_cread_2(Addr a) {
1764 Thread* thr = get_current_Thread_in_C_C();
1765 Thr* hbthr = thr->hbthr;
1766 LIBHB_CREAD_2(hbthr, a);
1769 static VG_REGPARM(1)
1770 void evh__mem_help_cread_4(Addr a) {
1771 Thread* thr = get_current_Thread_in_C_C();
1772 Thr* hbthr = thr->hbthr;
1773 LIBHB_CREAD_4(hbthr, a);
1776 static VG_REGPARM(1)
1777 void evh__mem_help_cread_8(Addr a) {
1778 Thread* thr = get_current_Thread_in_C_C();
1779 Thr* hbthr = thr->hbthr;
1780 LIBHB_CREAD_8(hbthr, a);
1783 static VG_REGPARM(2)
1784 void evh__mem_help_cread_N(Addr a, SizeT size) {
1785 Thread* thr = get_current_Thread_in_C_C();
1786 Thr* hbthr = thr->hbthr;
1787 LIBHB_CREAD_N(hbthr, a, size);
1790 static VG_REGPARM(1)
1791 void evh__mem_help_cwrite_1(Addr a) {
1792 Thread* thr = get_current_Thread_in_C_C();
1793 Thr* hbthr = thr->hbthr;
1794 LIBHB_CWRITE_1(hbthr, a);
1797 static VG_REGPARM(1)
1798 void evh__mem_help_cwrite_2(Addr a) {
1799 Thread* thr = get_current_Thread_in_C_C();
1800 Thr* hbthr = thr->hbthr;
1801 LIBHB_CWRITE_2(hbthr, a);
1804 static VG_REGPARM(1)
1805 void evh__mem_help_cwrite_4(Addr a) {
1806 Thread* thr = get_current_Thread_in_C_C();
1807 Thr* hbthr = thr->hbthr;
1808 LIBHB_CWRITE_4(hbthr, a);
1811 static VG_REGPARM(1)
1812 void evh__mem_help_cwrite_8(Addr a) {
1813 Thread* thr = get_current_Thread_in_C_C();
1814 Thr* hbthr = thr->hbthr;
1815 LIBHB_CWRITE_8(hbthr, a);
1818 static VG_REGPARM(2)
1819 void evh__mem_help_cwrite_N(Addr a, SizeT size) {
1820 Thread* thr = get_current_Thread_in_C_C();
1821 Thr* hbthr = thr->hbthr;
1822 LIBHB_CWRITE_N(hbthr, a, size);
1826 /* ------------------------------------------------------- */
1827 /* -------------- events to do with mutexes -------------- */
1828 /* ------------------------------------------------------- */
1830 /* EXPOSITION only: by intercepting lock init events we can show the
1831 user where the lock was initialised, rather than only being able to
1832 show where it was first locked. Intercepting lock initialisations
1833 is not necessary for the basic operation of the race checker. */
1835 void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1836 void* mutex, Word mbRec )
1838 if (SHOW_EVENTS >= 1)
1839 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1840 (Int)tid, mbRec, (void*)mutex );
1841 tl_assert(mbRec == 0 || mbRec == 1);
1842 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1844 if (HG_(clo_sanity_flags) & SCE_LOCKS)
1845 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1849 void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1853 if (SHOW_EVENTS >= 1)
1854 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1855 (Int)tid, (void*)mutex );
1857 thr = map_threads_maybe_lookup( tid );
1858 /* cannot fail - Thread* must already exist */
1859 tl_assert( HG_(is_sane_Thread)(thr) );
1861 lk = map_locks_maybe_lookup( (Addr)mutex );
1863 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
1864 HG_(record_error_Misc)(
1865 thr, "pthread_mutex_destroy with invalid argument" );
1869 tl_assert( HG_(is_sane_LockN)(lk) );
1870 tl_assert( lk->guestaddr == (Addr)mutex );
1872 /* Basically act like we unlocked the lock */
1873 HG_(record_error_Misc)(
1874 thr, "pthread_mutex_destroy of a locked mutex" );
1875 /* remove lock from locksets of all owning threads */
1876 remove_Lock_from_locksets_of_all_owning_Threads( lk );
1877 VG_(deleteBag)( lk->heldBy );
1880 lk->acquired_at = NULL;
1882 tl_assert( !lk->heldBy );
1883 tl_assert( HG_(is_sane_LockN)(lk) );
1885 if (HG_(clo_track_lockorders))
1886 laog__handle_one_lock_deletion(lk);
1887 map_locks_delete( lk->guestaddr );
1891 if (HG_(clo_sanity_flags) & SCE_LOCKS)
1892 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1895 static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1896 void* mutex, Word isTryLock )
1898 /* Just check the mutex is sane; nothing else to do. */
1899 // 'mutex' may be invalid - not checked by wrapper
1902 if (SHOW_EVENTS >= 1)
1903 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1904 (Int)tid, (void*)mutex );
1906 tl_assert(isTryLock == 0 || isTryLock == 1);
1907 thr = map_threads_maybe_lookup( tid );
1908 tl_assert(thr); /* cannot fail - Thread* must already exist */
1910 lk = map_locks_maybe_lookup( (Addr)mutex );
1912 if (lk && (lk->kind == LK_rdwr)) {
1913 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1914 "pthread_rwlock_t* argument " );
1919 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1922 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
1923 /* uh, it's a non-recursive lock and we already w-hold it, and
1924 this is a real lock operation (not a speculative "tryLock"
1925 kind of thing). Duh. Deadlock coming up; but at least
1926 produce an error message. */
1927 HChar* errstr = "Attempt to re-lock a "
1928 "non-recursive lock I already hold";
1929 HChar* auxstr = "Lock was previously acquired";
1930 if (lk->acquired_at) {
1931 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1933 HG_(record_error_Misc)( thr, errstr );
1938 static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1940 // only called if the real library call succeeded - so mutex is sane
1942 if (SHOW_EVENTS >= 1)
1943 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1944 (Int)tid, (void*)mutex );
1946 thr = map_threads_maybe_lookup( tid );
1947 tl_assert(thr); /* cannot fail - Thread* must already exist */
1949 evhH__post_thread_w_acquires_lock(
1951 LK_mbRec, /* if not known, create new lock with this LockKind */
1956 static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1958 // 'mutex' may be invalid - not checked by wrapper
1960 if (SHOW_EVENTS >= 1)
1961 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1962 (Int)tid, (void*)mutex );
1964 thr = map_threads_maybe_lookup( tid );
1965 tl_assert(thr); /* cannot fail - Thread* must already exist */
1967 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1970 static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1972 // only called if the real library call succeeded - so mutex is sane
1974 if (SHOW_EVENTS >= 1)
1975 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1976 (Int)tid, (void*)mutex );
1977 thr = map_threads_maybe_lookup( tid );
1978 tl_assert(thr); /* cannot fail - Thread* must already exist */
1980 // anything we should do here?
1984 /* ------------------------------------------------------- */
1985 /* -------------- events to do with spinlocks ------------ */
1986 /* ------------------------------------------------------- */
1988 /* All a bit of a kludge. Pretend we're really dealing with ordinary
1989 pthread_mutex_t's instead, for the most part. */
1991 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
1996 /* In glibc's kludgey world, we're either initialising or unlocking
1997 it. Since this is the pre-routine, if it is locked, unlock it
1998 and take a dependence edge. Otherwise, do nothing. */
2000 if (SHOW_EVENTS >= 1)
2001 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2002 "(ctid=%d, slock=%p)\n",
2003 (Int)tid, (void*)slock );
2005 thr = map_threads_maybe_lookup( tid );
2006 /* cannot fail - Thread* must already exist */;
2007 tl_assert( HG_(is_sane_Thread)(thr) );
2009 lk = map_locks_maybe_lookup( (Addr)slock );
2010 if (lk && lk->heldBy) {
2011 /* it's held. So do the normal pre-unlock actions, as copied
2012 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2013 duplicates the map_locks_maybe_lookup. */
2014 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2019 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2023 /* More kludgery. If the lock has never been seen before, do
2024 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2027 if (SHOW_EVENTS >= 1)
2028 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2029 "(ctid=%d, slock=%p)\n",
2030 (Int)tid, (void*)slock );
2032 lk = map_locks_maybe_lookup( (Addr)slock );
2034 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2038 static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2039 void* slock, Word isTryLock )
2041 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2044 static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2047 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2050 static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2053 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2057 /* ----------------------------------------------------- */
2058 /* --------------- events to do with CVs --------------- */
2059 /* ----------------------------------------------------- */
2061 /* A mapping from CV to (the SO associated with it, plus some
2062 auxiliary data for error checking). When the CV is
2063 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2064 wait on it completes, we do a 'recv' from the SO. This is believed
2065 to give the correct happens-before events arising from CV
2066 signallings/broadcasts.
2069 /* .so is the SO for this CV.
2070 .mx_ga is the associated mutex, when .nWaiters > 0
2072 POSIX says effectively that the first pthread_cond_{timed}wait call
2073 causes a dynamic binding between the CV and the mutex, and that
2074 lasts until such time as the waiter count falls to zero. Hence
2075 need to keep track of the number of waiters in order to do
2076 consistency tracking. */
2079 SO* so; /* libhb-allocated SO */
2080 void* mx_ga; /* addr of associated mutex, if any */
2081 UWord nWaiters; /* # threads waiting on the CV */
2086 /* pthread_cond_t* -> CVInfo* */
2087 static WordFM* map_cond_to_CVInfo = NULL;
2089 static void map_cond_to_CVInfo_INIT ( void ) {
2090 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2091 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2092 "hg.mctCI.1", HG_(free), NULL );
2093 tl_assert(map_cond_to_CVInfo != NULL);
2097 static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
2099 map_cond_to_CVInfo_INIT();
2100 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2101 tl_assert(key == (UWord)cond);
2102 return (CVInfo*)val;
2104 SO* so = libhb_so_alloc();
2105 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2108 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2113 static void map_cond_to_CVInfo_delete ( void* cond ) {
2115 map_cond_to_CVInfo_INIT();
2116 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2117 CVInfo* cvi = (CVInfo*)valW;
2118 tl_assert(keyW == (UWord)cond);
2121 libhb_so_dealloc(cvi->so);
2127 static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2129 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2130 cond to a SO if it is not already so bound, and 'send' on the
2131 SO. This is later used by other thread(s) which successfully
2132 exit from a pthread_cond_wait on the same cv; then they 'recv'
2133 from the SO, thereby acquiring a dependency on this signalling
2139 if (SHOW_EVENTS >= 1)
2140 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2141 (Int)tid, (void*)cond );
2143 thr = map_threads_maybe_lookup( tid );
2144 tl_assert(thr); /* cannot fail - Thread* must already exist */
2146 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2150 // error-if: mutex is bogus
2151 // error-if: mutex is not locked
2152 // Hmm. POSIX doesn't actually say that it's an error to call
2153 // pthread_cond_signal with the associated mutex being unlocked.
2154 // Although it does say that it should be "if consistent scheduling
2157 // For the moment, disable these checks.
2158 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2159 //if (lk == NULL || cvi->mx_ga == 0) {
2160 // HG_(record_error_Misc)( thr,
2161 // "pthread_cond_{signal,broadcast}: "
2162 // "no or invalid mutex associated with cond");
2164 ///* note: lk could be NULL. Be careful. */
2166 // if (lk->kind == LK_rdwr) {
2167 // HG_(record_error_Misc)(thr,
2168 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2170 // if (lk->heldBy == NULL) {
2171 // HG_(record_error_Misc)(thr,
2172 // "pthread_cond_{signal,broadcast}: "
2173 // "associated lock is not held by any thread");
2175 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2176 // HG_(record_error_Misc)(thr,
2177 // "pthread_cond_{signal,broadcast}: "
2178 // "associated lock is not held by calling thread");
2182 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2185 /* returns True if it reckons 'mutex' is valid and held by this
2186 thread, else False */
2187 static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2188 void* cond, void* mutex )
2192 Bool lk_valid = True;
2195 if (SHOW_EVENTS >= 1)
2196 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2197 "(ctid=%d, cond=%p, mutex=%p)\n",
2198 (Int)tid, (void*)cond, (void*)mutex );
2200 thr = map_threads_maybe_lookup( tid );
2201 tl_assert(thr); /* cannot fail - Thread* must already exist */
2203 lk = map_locks_maybe_lookup( (Addr)mutex );
2205 /* Check for stupid mutex arguments. There are various ways to be
2206 a bozo. Only complain once, though, even if more than one thing
2210 HG_(record_error_Misc)(
2212 "pthread_cond_{timed}wait called with invalid mutex" );
2214 tl_assert( HG_(is_sane_LockN)(lk) );
2215 if (lk->kind == LK_rdwr) {
2217 HG_(record_error_Misc)(
2218 thr, "pthread_cond_{timed}wait called with mutex "
2219 "of type pthread_rwlock_t*" );
2221 if (lk->heldBy == NULL) {
2223 HG_(record_error_Misc)(
2224 thr, "pthread_cond_{timed}wait called with un-held mutex");
2226 if (lk->heldBy != NULL
2227 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
2229 HG_(record_error_Misc)(
2230 thr, "pthread_cond_{timed}wait called with mutex "
2231 "held by a different thread" );
2235 // error-if: cond is also associated with a different mutex
2236 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2239 if (cvi->nWaiters == 0) {
2240 /* form initial (CV,MX) binding */
2243 else /* check existing (CV,MX) binding */
2244 if (cvi->mx_ga != mutex) {
2245 HG_(record_error_Misc)(
2246 thr, "pthread_cond_{timed}wait: cond is associated "
2247 "with a different mutex");
2254 static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2255 void* cond, void* mutex )
2257 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2258 the SO for this cond, and 'recv' from it so as to acquire a
2259 dependency edge back to the signaller/broadcaster. */
2263 if (SHOW_EVENTS >= 1)
2264 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2265 "(ctid=%d, cond=%p, mutex=%p)\n",
2266 (Int)tid, (void*)cond, (void*)mutex );
2268 thr = map_threads_maybe_lookup( tid );
2269 tl_assert(thr); /* cannot fail - Thread* must already exist */
2271 // error-if: cond is also associated with a different mutex
2273 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2276 tl_assert(cvi->nWaiters > 0);
2278 if (!libhb_so_everSent(cvi->so)) {
2279 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2280 it? If this happened it would surely be a bug in the threads
2281 library. Or one of those fabled "spurious wakeups". */
2282 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2284 " without prior pthread_cond_post");
2287 /* anyway, acquire a dependency on it. */
2288 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2293 static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2296 /* Deal with destroy events. The only purpose is to free storage
2297 associated with the CV, so as to avoid any possible resource
2299 if (SHOW_EVENTS >= 1)
2300 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2301 "(ctid=%d, cond=%p)\n",
2302 (Int)tid, (void*)cond );
2304 map_cond_to_CVInfo_delete( cond );
2308 /* ------------------------------------------------------- */
2309 /* -------------- events to do with rwlocks -------------- */
2310 /* ------------------------------------------------------- */
2312 /* EXPOSITION only */
2314 void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2316 if (SHOW_EVENTS >= 1)
2317 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2318 (Int)tid, (void*)rwl );
2319 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
2320 if (HG_(clo_sanity_flags) & SCE_LOCKS)
2321 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2325 void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2329 if (SHOW_EVENTS >= 1)
2330 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2331 (Int)tid, (void*)rwl );
2333 thr = map_threads_maybe_lookup( tid );
2334 /* cannot fail - Thread* must already exist */
2335 tl_assert( HG_(is_sane_Thread)(thr) );
2337 lk = map_locks_maybe_lookup( (Addr)rwl );
2339 if (lk == NULL || lk->kind != LK_rdwr) {
2340 HG_(record_error_Misc)(
2341 thr, "pthread_rwlock_destroy with invalid argument" );
2345 tl_assert( HG_(is_sane_LockN)(lk) );
2346 tl_assert( lk->guestaddr == (Addr)rwl );
2348 /* Basically act like we unlocked the lock */
2349 HG_(record_error_Misc)(
2350 thr, "pthread_rwlock_destroy of a locked mutex" );
2351 /* remove lock from locksets of all owning threads */
2352 remove_Lock_from_locksets_of_all_owning_Threads( lk );
2353 VG_(deleteBag)( lk->heldBy );
2356 lk->acquired_at = NULL;
2358 tl_assert( !lk->heldBy );
2359 tl_assert( HG_(is_sane_LockN)(lk) );
2361 if (HG_(clo_track_lockorders))
2362 laog__handle_one_lock_deletion(lk);
2363 map_locks_delete( lk->guestaddr );
2367 if (HG_(clo_sanity_flags) & SCE_LOCKS)
2368 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2372 void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2374 Word isW, Word isTryLock )
2376 /* Just check the rwl is sane; nothing else to do. */
2377 // 'rwl' may be invalid - not checked by wrapper
2380 if (SHOW_EVENTS >= 1)
2381 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2382 (Int)tid, (Int)isW, (void*)rwl );
2384 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2385 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
2386 thr = map_threads_maybe_lookup( tid );
2387 tl_assert(thr); /* cannot fail - Thread* must already exist */
2389 lk = map_locks_maybe_lookup( (Addr)rwl );
2391 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2392 /* Wrong kind of lock. Duh. */
2393 HG_(record_error_Misc)(
2394 thr, "pthread_rwlock_{rd,rw}lock with a "
2395 "pthread_mutex_t* argument " );
2400 void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2402 // only called if the real library call succeeded - so mutex is sane
2404 if (SHOW_EVENTS >= 1)
2405 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2406 (Int)tid, (Int)isW, (void*)rwl );
2408 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2409 thr = map_threads_maybe_lookup( tid );
2410 tl_assert(thr); /* cannot fail - Thread* must already exist */
2412 (isW ? evhH__post_thread_w_acquires_lock
2413 : evhH__post_thread_r_acquires_lock)(
2415 LK_rdwr, /* if not known, create new lock with this LockKind */
2420 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2422 // 'rwl' may be invalid - not checked by wrapper
2424 if (SHOW_EVENTS >= 1)
2425 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2426 (Int)tid, (void*)rwl );
2428 thr = map_threads_maybe_lookup( tid );
2429 tl_assert(thr); /* cannot fail - Thread* must already exist */
2431 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2434 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2436 // only called if the real library call succeeded - so mutex is sane
2438 if (SHOW_EVENTS >= 1)
2439 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2440 (Int)tid, (void*)rwl );
2441 thr = map_threads_maybe_lookup( tid );
2442 tl_assert(thr); /* cannot fail - Thread* must already exist */
2444 // anything we should do here?
2448 /* ---------------------------------------------------------- */
2449 /* -------------- events to do with semaphores -------------- */
2450 /* ---------------------------------------------------------- */
2452 /* This is similar to but not identical to the handling for condition
2455 /* For each semaphore, we maintain a stack of SOs. When a 'post'
2456 operation is done on a semaphore (unlocking, essentially), a new SO
2457 is created for the posting thread, the posting thread does a strong
2458 send to it (which merely installs the posting thread's VC in the
2459 SO), and the SO is pushed on the semaphore's stack.
2461 Later, when a (probably different) thread completes 'wait' on the
2462 semaphore, we pop a SO off the semaphore's stack (which should be
2463 nonempty), and do a strong recv from it. This mechanism creates
2464 dependencies between posters and waiters of the semaphore.
2466 It may not be necessary to use a stack - perhaps a bag of SOs would
2467 do. But we do need to keep track of how many unused-up posts have
2468 happened for the semaphore.
2470 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
2471 twice on S. T3 cannot complete its waits without both T1 and T2
2472 posting. The above mechanism will ensure that T3 acquires
2473 dependencies on both T1 and T2.
2475 When a semaphore is initialised with value N, we do as if we'd
2476 posted N times on the semaphore: basically create N SOs and do a
2477 strong send to all of then. This allows up to N waits on the
2478 semaphore to acquire a dependency on the initialisation point,
2479 which AFAICS is the correct behaviour.
2481 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2485 /* sem_t* -> XArray* SO* */
2486 static WordFM* map_sem_to_SO_stack = NULL;
2488 static void map_sem_to_SO_stack_INIT ( void ) {
2489 if (map_sem_to_SO_stack == NULL) {
2490 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2492 tl_assert(map_sem_to_SO_stack != NULL);
2496 static void push_SO_for_sem ( void* sem, SO* so ) {
2500 map_sem_to_SO_stack_INIT();
2501 if (VG_(lookupFM)( map_sem_to_SO_stack,
2502 &keyW, (UWord*)&xa, (UWord)sem )) {
2503 tl_assert(keyW == (UWord)sem);
2505 VG_(addToXA)( xa, &so );
2507 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2508 VG_(addToXA)( xa, &so );
2509 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
2513 static SO* mb_pop_SO_for_sem ( void* sem ) {
2517 map_sem_to_SO_stack_INIT();
2518 if (VG_(lookupFM)( map_sem_to_SO_stack,
2519 &keyW, (UWord*)&xa, (UWord)sem )) {
2520 /* xa is the stack for this semaphore. */
2522 tl_assert(keyW == (UWord)sem);
2523 sz = VG_(sizeXA)( xa );
2526 return NULL; /* odd, the stack is empty */
2527 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2529 VG_(dropTailXA)( xa, 1 );
2532 /* hmm, that's odd. No stack for this semaphore. */
2537 static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
2542 if (SHOW_EVENTS >= 1)
2543 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
2544 (Int)tid, (void*)sem );
2546 map_sem_to_SO_stack_INIT();
2548 /* Empty out the semaphore's SO stack. This way of doing it is
2549 stupid, but at least it's easy. */
2551 so = mb_pop_SO_for_sem( sem );
2553 libhb_so_dealloc(so);
2556 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2557 XArray* xa = (XArray*)valW;
2558 tl_assert(keyW == (UWord)sem);
2560 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2566 void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2571 if (SHOW_EVENTS >= 1)
2572 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2573 (Int)tid, (void*)sem, value );
2575 thr = map_threads_maybe_lookup( tid );
2576 tl_assert(thr); /* cannot fail - Thread* must already exist */
2578 /* Empty out the semaphore's SO stack. This way of doing it is
2579 stupid, but at least it's easy. */
2581 so = mb_pop_SO_for_sem( sem );
2583 libhb_so_dealloc(so);
2586 /* If we don't do this check, the following while loop runs us out
2587 of memory for stupid initial values of 'value'. */
2588 if (value > 10000) {
2589 HG_(record_error_Misc)(
2590 thr, "sem_init: initial value exceeds 10000; using 10000" );
2594 /* Now create 'valid' new SOs for the thread, do a strong send to
2595 each of them, and push them all on the stack. */
2596 for (; value > 0; value--) {
2597 Thr* hbthr = thr->hbthr;
2600 so = libhb_so_alloc();
2601 libhb_so_send( hbthr, so, True/*strong send*/ );
2602 push_SO_for_sem( sem, so );
2606 static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
2608 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2609 it (iow, write our VC into it, then tick ours), and push the SO
2610 on on a stack of SOs associated with 'sem'. This is later used
2611 by other thread(s) which successfully exit from a sem_wait on
2612 the same sem; by doing a strong recv from SOs popped of the
2613 stack, they acquire dependencies on the posting thread
2620 if (SHOW_EVENTS >= 1)
2621 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
2622 (Int)tid, (void*)sem );
2624 thr = map_threads_maybe_lookup( tid );
2625 tl_assert(thr); /* cannot fail - Thread* must already exist */
2627 // error-if: sem is bogus
2632 so = libhb_so_alloc();
2633 libhb_so_send( hbthr, so, True/*strong send*/ );
2634 push_SO_for_sem( sem, so );
2637 static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
2639 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2640 the 'sem' from this semaphore's SO-stack, and do a strong recv
2641 from it. This creates a dependency back to one of the post-ers
2642 for the semaphore. */
2648 if (SHOW_EVENTS >= 1)
2649 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
2650 (Int)tid, (void*)sem );
2652 thr = map_threads_maybe_lookup( tid );
2653 tl_assert(thr); /* cannot fail - Thread* must already exist */
2655 // error-if: sem is bogus
2657 so = mb_pop_SO_for_sem( sem );
2663 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2664 libhb_so_dealloc(so);
2666 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2667 If this happened it would surely be a bug in the threads
2669 HG_(record_error_Misc)(
2670 thr, "Bug in libpthread: sem_wait succeeded on"
2671 " semaphore without prior sem_post");
2676 /* -------------------------------------------------------- */
2677 /* -------------- events to do with barriers -------------- */
2678 /* -------------------------------------------------------- */
2682 Bool initted; /* has it yet been initted by guest? */
2683 Bool resizable; /* is resizing allowed? */
2684 UWord size; /* declared size */
2685 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2689 static Bar* new_Bar ( void ) {
2690 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2692 /* all fields are zero */
2693 tl_assert(bar->initted == False);
2697 static void delete_Bar ( Bar* bar ) {
2700 VG_(deleteXA)(bar->waiting);
2704 /* A mapping which stores auxiliary data for barriers. */
2706 /* pthread_barrier_t* -> Bar* */
2707 static WordFM* map_barrier_to_Bar = NULL;
2709 static void map_barrier_to_Bar_INIT ( void ) {
2710 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2711 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2712 "hg.mbtBI.1", HG_(free), NULL );
2713 tl_assert(map_barrier_to_Bar != NULL);
2717 static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2719 map_barrier_to_Bar_INIT();
2720 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2721 tl_assert(key == (UWord)barrier);
2724 Bar* bar = new_Bar();
2725 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2730 static void map_barrier_to_Bar_delete ( void* barrier ) {
2732 map_barrier_to_Bar_INIT();
2733 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2734 Bar* bar = (Bar*)valW;
2735 tl_assert(keyW == (UWord)barrier);
2741 static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2749 if (SHOW_EVENTS >= 1)
2750 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
2751 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2752 (Int)tid, (void*)barrier, count, resizable );
2754 thr = map_threads_maybe_lookup( tid );
2755 tl_assert(thr); /* cannot fail - Thread* must already exist */
2758 HG_(record_error_Misc)(
2759 thr, "pthread_barrier_init: 'count' argument is zero"
2763 if (resizable != 0 && resizable != 1) {
2764 HG_(record_error_Misc)(
2765 thr, "pthread_barrier_init: invalid 'resizable' argument"
2769 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2773 HG_(record_error_Misc)(
2774 thr, "pthread_barrier_init: barrier is already initialised"
2778 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2779 tl_assert(bar->initted);
2780 HG_(record_error_Misc)(
2781 thr, "pthread_barrier_init: threads are waiting at barrier"
2783 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2785 if (!bar->waiting) {
2786 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2790 tl_assert(bar->waiting);
2791 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
2792 bar->initted = True;
2793 bar->resizable = resizable == 1 ? True : False;
2798 static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2804 /* Deal with destroy events. The only purpose is to free storage
2805 associated with the barrier, so as to avoid any possible
2807 if (SHOW_EVENTS >= 1)
2808 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2809 "(tid=%d, barrier=%p)\n",
2810 (Int)tid, (void*)barrier );
2812 thr = map_threads_maybe_lookup( tid );
2813 tl_assert(thr); /* cannot fail - Thread* must already exist */
2815 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2818 if (!bar->initted) {
2819 HG_(record_error_Misc)(
2820 thr, "pthread_barrier_destroy: barrier was never initialised"
2824 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2825 HG_(record_error_Misc)(
2826 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2830 /* Maybe we shouldn't do this; just let it persist, so that when it
2831 is reinitialised we don't need to do any dynamic memory
2832 allocation? The downside is a potentially unlimited space leak,
2833 if the client creates (in turn) a large number of barriers all
2834 at different locations. Note that if we do later move to the
2835 don't-delete-it scheme, we need to mark the barrier as
2836 uninitialised again since otherwise a later _init call will
2837 elicit a duplicate-init error. */
2838 map_barrier_to_Bar_delete( barrier );
2842 /* All the threads have arrived. Now do the Interesting Bit. Get a
2843 new synchronisation object and do a weak send to it from all the
2844 participating threads. This makes its vector clocks be the join of
2845 all the individual threads' vector clocks. Then do a strong
2846 receive from it back to all threads, so that their VCs are a copy
2847 of it (hence are all equal to the join of their original VCs.) */
2848 static void do_barrier_cross_sync_and_empty ( Bar* bar )
2850 /* XXX check bar->waiting has no duplicates */
2852 SO* so = libhb_so_alloc();
2854 tl_assert(bar->waiting);
2855 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2857 /* compute the join ... */
2858 for (i = 0; i < bar->size; i++) {
2859 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2860 Thr* hbthr = t->hbthr;
2861 libhb_so_send( hbthr, so, False/*weak send*/ );
2863 /* ... and distribute to all threads */
2864 for (i = 0; i < bar->size; i++) {
2865 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2866 Thr* hbthr = t->hbthr;
2867 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2870 /* finally, we must empty out the waiting vector */
2871 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2873 /* and we don't need this any more. Perhaps a stack-allocated
2874 SO would be better? */
2875 libhb_so_dealloc(so);
2879 static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2882 /* This function gets called after a client thread calls
2883 pthread_barrier_wait but before it arrives at the real
2884 pthread_barrier_wait.
2886 Why is the following correct? It's a bit subtle.
2888 If this is not the last thread arriving at the barrier, we simply
2889 note its presence and return. Because valgrind (at least as of
2890 Nov 08) is single threaded, we are guaranteed safe from any race
2891 conditions when in this function -- no other client threads are
2894 If this is the last thread, then we are again the only running
2895 thread. All the other threads will have either arrived at the
2896 real pthread_barrier_wait or are on their way to it, but in any
2897 case are guaranteed not to be able to move past it, because this
2898 thread is currently in this function and so has not yet arrived
2899 at the real pthread_barrier_wait. That means that:
2901 1. While we are in this function, none of the other threads
2902 waiting at the barrier can move past it.
2904 2. When this function returns (and simulated execution resumes),
2905 this thread and all other waiting threads will be able to move
2906 past the real barrier.
2908 Because of this, it is now safe to update the vector clocks of
2909 all threads, to represent the fact that they all arrived at the
2910 barrier and have all moved on. There is no danger of any
2911 complications to do with some threads leaving the barrier and
2912 racing back round to the front, whilst others are still leaving
2913 (which is the primary source of complication in correct handling/
2914 implementation of barriers). That can't happen because we update
2915 here our data structures so as to indicate that the threads have
2916 passed the barrier, even though, as per (2) above, they are
2917 guaranteed not to pass the barrier until we return.
2919 This relies crucially on Valgrind being single threaded. If that
2920 changes, this will need to be reconsidered.
2926 if (SHOW_EVENTS >= 1)
2927 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2928 "(tid=%d, barrier=%p)\n",
2929 (Int)tid, (void*)barrier );
2931 thr = map_threads_maybe_lookup( tid );
2932 tl_assert(thr); /* cannot fail - Thread* must already exist */
2934 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2937 if (!bar->initted) {
2938 HG_(record_error_Misc)(
2939 thr, "pthread_barrier_wait: barrier is uninitialised"
2941 return; /* client is broken .. avoid assertions below */
2944 /* guaranteed by _INIT_PRE above */
2945 tl_assert(bar->size > 0);
2946 tl_assert(bar->waiting);
2948 VG_(addToXA)( bar->waiting, &thr );
2950 /* guaranteed by this function */
2951 present = VG_(sizeXA)(bar->waiting);
2952 tl_assert(present > 0 && present <= bar->size);
2954 if (present < bar->size)
2957 do_barrier_cross_sync_and_empty(bar);
2961 static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
2969 if (SHOW_EVENTS >= 1)
2970 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
2971 "(tid=%d, barrier=%p, newcount=%lu)\n",
2972 (Int)tid, (void*)barrier, newcount );
2974 thr = map_threads_maybe_lookup( tid );
2975 tl_assert(thr); /* cannot fail - Thread* must already exist */
2977 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2980 if (!bar->initted) {
2981 HG_(record_error_Misc)(
2982 thr, "pthread_barrier_resize: barrier is uninitialised"
2984 return; /* client is broken .. avoid assertions below */
2987 if (!bar->resizable) {
2988 HG_(record_error_Misc)(
2989 thr, "pthread_barrier_resize: barrier is may not be resized"
2991 return; /* client is broken .. avoid assertions below */
2994 if (newcount == 0) {
2995 HG_(record_error_Misc)(
2996 thr, "pthread_barrier_resize: 'newcount' argument is zero"
2998 return; /* client is broken .. avoid assertions below */
3001 /* guaranteed by _INIT_PRE above */
3002 tl_assert(bar->size > 0);
3003 tl_assert(bar->waiting);
3004 /* Guaranteed by this fn */
3005 tl_assert(newcount > 0);
3007 if (newcount >= bar->size) {
3008 /* Increasing the capacity. There's no possibility of threads
3009 moving on from the barrier in this situation, so just note
3010 the fact and do nothing more. */
3011 bar->size = newcount;
3013 /* Decreasing the capacity. If we decrease it to be equal or
3014 below the number of waiting threads, they will now move past
3015 the barrier, so need to mess with dep edges in the same way
3016 as if the barrier had filled up normally. */
3017 present = VG_(sizeXA)(bar->waiting);
3018 tl_assert(present >= 0 && present <= bar->size);
3019 if (newcount <= present) {
3020 bar->size = present; /* keep the cross_sync call happy */
3021 do_barrier_cross_sync_and_empty(bar);
3023 bar->size = newcount;
3028 /* ----------------------------------------------------- */
3029 /* ----- events to do with user-specified HB edges ----- */
3030 /* ----------------------------------------------------- */
3032 /* A mapping from arbitrary UWord tag to the SO associated with it.
3033 The UWord tags are meaningless to us, interpreted only by the
3039 static WordFM* map_usertag_to_SO = NULL;
3041 static void map_usertag_to_SO_INIT ( void ) {
3042 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3043 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3044 "hg.mutS.1", HG_(free), NULL );
3045 tl_assert(map_usertag_to_SO != NULL);
3049 static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3051 map_usertag_to_SO_INIT();
3052 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3053 tl_assert(key == (UWord)usertag);
3056 SO* so = libhb_so_alloc();
3057 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3062 static void map_usertag_to_SO_delete ( UWord usertag ) {
3064 map_usertag_to_SO_INIT();
3065 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3067 tl_assert(keyW == usertag);
3069 libhb_so_dealloc(so);
3075 void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3077 /* TID is just about to notionally sent a message on a notional
3078 abstract synchronisation object whose identity is given by
3079 USERTAG. Bind USERTAG to a real SO if it is not already so
3080 bound, and do a 'weak send' on the SO. This joins the vector
3081 clocks from this thread into any vector clocks already present
3082 in the SO. The resulting SO vector clocks are later used by
3083 other thread(s) which successfully 'receive' from the SO,
3084 thereby acquiring a dependency on all the events that have
3085 previously signalled on this SO. */
3089 if (SHOW_EVENTS >= 1)
3090 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3091 (Int)tid, usertag );
3093 thr = map_threads_maybe_lookup( tid );
3094 tl_assert(thr); /* cannot fail - Thread* must already exist */
3096 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3099 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
3103 void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3105 /* TID has just notionally received a message from a notional
3106 abstract synchronisation object whose identity is given by
3107 USERTAG. Bind USERTAG to a real SO if it is not already so
3108 bound. If the SO has at some point in the past been 'sent' on,
3109 to a 'strong receive' on it, thereby acquiring a dependency on
3114 if (SHOW_EVENTS >= 1)
3115 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3116 (Int)tid, usertag );
3118 thr = map_threads_maybe_lookup( tid );
3119 tl_assert(thr); /* cannot fail - Thread* must already exist */
3121 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3124 /* Acquire a dependency on it. If the SO has never so far been
3125 sent on, then libhb_so_recv will do nothing. So we're safe
3126 regardless of SO's history. */
3127 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3131 void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3133 /* TID declares that any happens-before edges notionally stored in
3134 USERTAG can be deleted. If (as would normally be the case) a
3135 SO is associated with USERTAG, then the assocation is removed
3136 and all resources associated with SO are freed. Importantly,
3137 that frees up any VTSs stored in SO. */
3138 if (SHOW_EVENTS >= 1)
3139 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3140 (Int)tid, usertag );
3142 map_usertag_to_SO_delete( usertag );
3146 /*--------------------------------------------------------------*/
3147 /*--- Lock acquisition order monitoring ---*/
3148 /*--------------------------------------------------------------*/
3150 /* FIXME: here are some optimisations still to do in
3151 laog__pre_thread_acquires_lock.
3153 The graph is structured so that if L1 --*--> L2 then L1 must be
3156 The common case is that some thread T holds (eg) L1 L2 and L3 and
3157 is repeatedly acquiring and releasing Ln, and there is no ordering
3158 error in what it is doing. Hence it repeatly:
3160 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3161 produces the answer No (because there is no error).
3163 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3164 (because they already got added the first time T acquired Ln).
3166 Hence cache these two events:
3168 (1) Cache result of the query from last time. Invalidate the cache
3169 any time any edges are added to or deleted from laog.
3171 (2) Cache these add-edge requests and ignore them if said edges
3172 have already been added to laog. Invalidate the cache any time
3173 any edges are deleted from laog.
3178 WordSetID inns; /* in univ_laog */
3179 WordSetID outs; /* in univ_laog */
3183 /* lock order acquisition graph */
3184 static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3186 /* EXPOSITION ONLY: for each edge in 'laog', record the two places
3187 where that edge was created, so that we can show the user later if
3191 Addr src_ga; /* Lock guest addresses for */
3192 Addr dst_ga; /* src/dst of the edge */
3193 ExeContext* src_ec; /* And corresponding places where that */
3194 ExeContext* dst_ec; /* ordering was established */
3198 static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
3199 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3200 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3201 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3202 if (llx1->src_ga < llx2->src_ga) return -1;
3203 if (llx1->src_ga > llx2->src_ga) return 1;
3204 if (llx1->dst_ga < llx2->dst_ga) return -1;
3205 if (llx1->dst_ga > llx2->dst_ga) return 1;
3209 static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3210 /* end EXPOSITION ONLY */
3213 __attribute__((noinline))
3214 static void laog__init ( void )
3217 tl_assert(!laog_exposition);
3218 tl_assert(HG_(clo_track_lockorders));
3220 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3221 HG_(free), NULL/*unboxedcmp*/ );
3223 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3224 cmp_LAOGLinkExposition );
3226 tl_assert(laog_exposition);
3229 static void laog__show ( Char* who ) {
3234 VG_(printf)("laog (requested by %s) {\n", who);
3235 VG_(initIterFM)( laog );
3238 while (VG_(nextIterFM)( laog, (Word*)&me,
3242 VG_(printf)(" node %p:\n", me);
3243 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3244 for (i = 0; i < ws_size; i++)
3245 VG_(printf)(" inn %#lx\n", ws_words[i] );
3246 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3247 for (i = 0; i < ws_size; i++)
3248 VG_(printf)(" out %#lx\n", ws_words[i] );
3252 VG_(doneIterFM)( laog );
3256 __attribute__((noinline))
3257 static void laog__add_edge ( Lock* src, Lock* dst ) {
3260 Bool presentF, presentR;
3261 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3263 /* Take the opportunity to sanity check the graph. Record in
3264 presentF if there is already a src->dst mapping in this node's
3265 forwards links, and presentR if there is already a src->dst
3266 mapping in this node's backwards links. They should agree!
3267 Also, we need to know whether the edge was already present so as
3268 to decide whether or not to update the link details mapping. We
3269 can compute presentF and presentR essentially for free, so may
3270 as well do this always. */
3271 presentF = presentR = False;
3273 /* Update the out edges for src */
3276 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
3279 tl_assert(keyW == (Word)src);
3280 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3281 presentF = outs_new == links->outs;
3282 links->outs = outs_new;
3284 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
3285 links->inns = HG_(emptyWS)( univ_laog );
3286 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
3287 VG_(addToFM)( laog, (Word)src, (Word)links );
3289 /* Update the in edges for dst */
3292 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
3295 tl_assert(keyW == (Word)dst);
3296 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3297 presentR = inns_new == links->inns;
3298 links->inns = inns_new;
3300 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
3301 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3302 links->outs = HG_(emptyWS)( univ_laog );
3303 VG_(addToFM)( laog, (Word)dst, (Word)links );
3306 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3308 if (!presentF && src->acquired_at && dst->acquired_at) {
3309 LAOGLinkExposition expo;
3310 /* If this edge is entering the graph, and we have acquired_at
3311 information for both src and dst, record those acquisition
3312 points. Hence, if there is later a violation of this
3313 ordering, we can show the user the two places in which the
3314 required src-dst ordering was previously established. */
3315 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
3316 src->guestaddr, dst->guestaddr);
3317 expo.src_ga = src->guestaddr;
3318 expo.dst_ga = dst->guestaddr;
3321 tl_assert(laog_exposition);
3322 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
3323 /* we already have it; do nothing */
3325 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3326 sizeof(LAOGLinkExposition));
3327 expo2->src_ga = src->guestaddr;
3328 expo2->dst_ga = dst->guestaddr;
3329 expo2->src_ec = src->acquired_at;
3330 expo2->dst_ec = dst->acquired_at;
3331 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
3336 __attribute__((noinline))
3337 static void laog__del_edge ( Lock* src, Lock* dst ) {
3340 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3341 /* Update the out edges for src */
3344 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
3346 tl_assert(keyW == (Word)src);
3347 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3349 /* Update the in edges for dst */
3352 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
3354 tl_assert(keyW == (Word)dst);
3355 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3359 __attribute__((noinline))
3360 static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3365 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
3367 tl_assert(keyW == (Word)lk);
3370 return HG_(emptyWS)( univ_laog );
3374 __attribute__((noinline))
3375 static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3380 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
3382 tl_assert(keyW == (Word)lk);
3385 return HG_(emptyWS)( univ_laog );
3389 __attribute__((noinline))
3390 static void laog__sanity_check ( Char* who ) {
3395 VG_(initIterFM)( laog );
3398 if (0) VG_(printf)("laog sanity check\n");
3399 while (VG_(nextIterFM)( laog, (Word*)&me,
3403 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3404 for (i = 0; i < ws_size; i++) {
3405 if ( ! HG_(elemWS)( univ_laog,
3406 laog__succs( (Lock*)ws_words[i] ),
3410 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3411 for (i = 0; i < ws_size; i++) {
3412 if ( ! HG_(elemWS)( univ_laog,
3413 laog__preds( (Lock*)ws_words[i] ),
3420 VG_(doneIterFM)( laog );
3424 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3429 /* If there is a path in laog from 'src' to any of the elements in
3430 'dst', return an arbitrarily chosen element of 'dst' reachable from
3431 'src'. If no path exist from 'src' to any element in 'dst', return
3433 __attribute__((noinline))
3435 Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3439 XArray* stack; /* of Lock* */
3440 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3445 //laog__sanity_check();
3447 /* If the destination set is empty, we can never get there from
3448 'src' :-), so don't bother to try */
3449 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3453 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3454 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
3456 (void) VG_(addToXA)( stack, &src );
3460 ssz = VG_(sizeXA)( stack );
3462 if (ssz == 0) { ret = NULL; break; }
3464 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3465 VG_(dropTailXA)( stack, 1 );
3467 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3469 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
3472 VG_(addToFM)( visited, (Word)here, 0 );
3474 succs = laog__succs( here );
3475 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3476 for (i = 0; i < succs_size; i++)
3477 (void) VG_(addToXA)( stack, &succs_words[i] );
3480 VG_(deleteFM)( visited, NULL, NULL );
3481 VG_(deleteXA)( stack );
3486 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3487 between 'lk' and the locks already held by 'thr' and issue a
3488 complaint if so. Also, update the ordering graph appropriately.
3490 __attribute__((noinline))
3491 static void laog__pre_thread_acquires_lock (
3492 Thread* thr, /* NB: BEFORE lock is added */
3500 /* It may be that 'thr' already holds 'lk' and is recursively
3501 relocking in. In this case we just ignore the call. */
3502 /* NB: univ_lsets really is correct here */
3503 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3506 /* First, the check. Complain if there is any path in laog from lk
3507 to any of the locks already held by thr, since if any such path
3508 existed, it would mean that previously lk was acquired before
3509 (rather than after, as we are doing here) at least one of those
3512 other = laog__do_dfs_from_to(lk, thr->locksetA);
3514 LAOGLinkExposition key, *found;
3515 /* So we managed to find a path lk --*--> other in the graph,
3516 which implies that 'lk' should have been acquired before
3517 'other' but is in fact being acquired afterwards. We present
3518 the lk/other arguments to record_error_LockOrder in the order
3519 in which they should have been acquired. */
3520 /* Go look in the laog_exposition mapping, to find the allocation
3521 points for this edge, so we can show the user. */
3522 key.src_ga = lk->guestaddr;
3523 key.dst_ga = other->guestaddr;
3527 if (VG_(lookupFM)( laog_exposition,
3528 (Word*)&found, NULL, (Word)&key )) {
3529 tl_assert(found != &key);
3530 tl_assert(found->src_ga == key.src_ga);
3531 tl_assert(found->dst_ga == key.dst_ga);
3532 tl_assert(found->src_ec);
3533 tl_assert(found->dst_ec);
3534 HG_(record_error_LockOrder)(
3535 thr, lk->guestaddr, other->guestaddr,
3536 found->src_ec, found->dst_ec );
3538 /* Hmm. This can't happen (can it?) */
3539 HG_(record_error_LockOrder)(
3540 thr, lk->guestaddr, other->guestaddr,
3545 /* Second, add to laog the pairs
3546 (old, lk) | old <- locks already held by thr
3547 Since both old and lk are currently held by thr, their acquired_at
3548 fields must be non-NULL.
3550 tl_assert(lk->acquired_at);
3551 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3552 for (i = 0; i < ls_size; i++) {
3553 Lock* old = (Lock*)ls_words[i];
3554 tl_assert(old->acquired_at);
3555 laog__add_edge( old, lk );
3558 /* Why "except_Locks" ? We're here because a lock is being
3559 acquired by a thread, and we're in an inconsistent state here.
3560 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3561 When called in this inconsistent state, locks__sanity_check duly
3563 if (HG_(clo_sanity_flags) & SCE_LAOG)
3564 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3568 /* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3570 __attribute__((noinline))
3571 static void laog__handle_one_lock_deletion ( Lock* lk )
3573 WordSetID preds, succs;
3574 Word preds_size, succs_size, i, j;
3575 UWord *preds_words, *succs_words;
3577 preds = laog__preds( lk );
3578 succs = laog__succs( lk );
3580 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3581 for (i = 0; i < preds_size; i++)
3582 laog__del_edge( (Lock*)preds_words[i], lk );
3584 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3585 for (j = 0; j < succs_size; j++)
3586 laog__del_edge( lk, (Lock*)succs_words[j] );
3588 for (i = 0; i < preds_size; i++) {
3589 for (j = 0; j < succs_size; j++) {
3590 if (preds_words[i] != succs_words[j]) {
3591 /* This can pass unlocked locks to laog__add_edge, since
3592 we're deleting stuff. So their acquired_at fields may
3594 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3600 //__attribute__((noinline))
3601 //static void laog__handle_lock_deletions (
3602 // WordSetID /* in univ_laog */ locksToDelete
3609 // HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3610 // for (i = 0; i < ws_size; i++)
3611 // laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3613 // if (HG_(clo_sanity_flags) & SCE_LAOG)
3614 // all__sanity_check("laog__handle_lock_deletions-post");
3618 /*--------------------------------------------------------------*/
3619 /*--- Malloc/free replacements ---*/
3620 /*--------------------------------------------------------------*/
3624 void* next; /* required by m_hashtable */
3625 Addr payload; /* ptr to actual block */
3626 SizeT szB; /* size requested */
3627 ExeContext* where; /* where it was allocated */
3628 Thread* thr; /* allocating thread */
3632 /* A hash table of MallocMetas, used to track malloc'd blocks
3634 static VgHashTable hg_mallocmeta_table = NULL;
3637 static MallocMeta* new_MallocMeta ( void ) {
3638 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
3642 static void delete_MallocMeta ( MallocMeta* md ) {
3647 /* Allocate a client block and set up the metadata for it. */
3650 void* handle_alloc ( ThreadId tid,
3651 SizeT szB, SizeT alignB, Bool is_zeroed )
3656 tl_assert( ((SSizeT)szB) >= 0 );
3657 p = (Addr)VG_(cli_malloc)(alignB, szB);
3662 VG_(memset)((void*)p, 0, szB);
3664 /* Note that map_threads_lookup must succeed (cannot assert), since
3665 memory can only be allocated by currently alive threads, hence
3666 they must have an entry in map_threads. */
3667 md = new_MallocMeta();
3670 md->where = VG_(record_ExeContext)( tid, 0 );
3671 md->thr = map_threads_lookup( tid );
3673 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3675 /* Tell the lower level memory wranglers. */
3676 evh__new_mem_heap( p, szB, is_zeroed );
3681 /* Re the checks for less-than-zero (also in hg_cli__realloc below):
3682 Cast to a signed type to catch any unexpectedly negative args.
3683 We're assuming here that the size asked for is not greater than
3684 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3686 static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3687 if (((SSizeT)n) < 0) return NULL;
3688 return handle_alloc ( tid, n, VG_(clo_alignment),
3689 /*is_zeroed*/False );
3691 static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3692 if (((SSizeT)n) < 0) return NULL;
3693 return handle_alloc ( tid, n, VG_(clo_alignment),
3694 /*is_zeroed*/False );
3696 static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3697 if (((SSizeT)n) < 0) return NULL;
3698 return handle_alloc ( tid, n, VG_(clo_alignment),
3699 /*is_zeroed*/False );
3701 static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3702 if (((SSizeT)n) < 0) return NULL;
3703 return handle_alloc ( tid, n, align,
3704 /*is_zeroed*/False );
3706 static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3707 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3708 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3709 /*is_zeroed*/True );
3713 /* Free a client block, including getting rid of the relevant
3716 static void handle_free ( ThreadId tid, void* p )
3718 MallocMeta *md, *old_md;
3721 /* First see if we can find the metadata for 'p'. */
3722 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3724 return; /* apparently freeing a bogus address. Oh well. */
3726 tl_assert(md->payload == (Addr)p);
3729 /* Nuke the metadata block */
3730 old_md = (MallocMeta*)
3731 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3732 tl_assert(old_md); /* it must be present - we just found it */
3733 tl_assert(old_md == md);
3734 tl_assert(old_md->payload == (Addr)p);
3736 VG_(cli_free)((void*)old_md->payload);
3737 delete_MallocMeta(old_md);
3739 /* Tell the lower level memory wranglers. */
3740 evh__die_mem_heap( (Addr)p, szB );
3743 static void hg_cli__free ( ThreadId tid, void* p ) {
3744 handle_free(tid, p);
3746 static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3747 handle_free(tid, p);
3749 static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3750 handle_free(tid, p);
3754 static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3756 MallocMeta *md, *md_new, *md_tmp;
3759 Addr payload = (Addr)payloadV;
3761 if (((SSizeT)new_size) < 0) return NULL;
3763 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3765 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3767 tl_assert(md->payload == payload);
3769 if (md->szB == new_size) {
3770 /* size unchanged */
3771 md->where = VG_(record_ExeContext)(tid, 0);
3775 if (md->szB > new_size) {
3776 /* new size is smaller */
3778 md->where = VG_(record_ExeContext)(tid, 0);
3779 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3784 /* new size is bigger */
3785 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3787 /* First half kept and copied, second half new */
3788 // FIXME: shouldn't we use a copier which implements the
3789 // memory state machine?
3790 evh__copy_mem( payload, p_new, md->szB );
3791 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
3793 /* FIXME: can anything funny happen here? specifically, if the
3794 old range contained a lock, then die_mem_heap will complain.
3795 Is that the correct behaviour? Not sure. */
3796 evh__die_mem_heap( payload, md->szB );
3798 /* Copy from old to new */
3799 for (i = 0; i < md->szB; i++)
3800 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3802 /* Because the metadata hash table is index by payload address,
3803 we have to get rid of the old hash table entry and make a new
3804 one. We can't just modify the existing metadata in place,
3805 because then it would (almost certainly) be in the wrong hash
3807 md_new = new_MallocMeta();
3810 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3812 tl_assert(md_tmp == md);
3814 VG_(cli_free)((void*)md->payload);
3815 delete_MallocMeta(md);
3818 md_new->where = VG_(record_ExeContext)( tid, 0 );
3819 md_new->szB = new_size;
3820 md_new->payload = p_new;
3821 md_new->thr = map_threads_lookup( tid );
3824 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3826 return (void*)p_new;
3830 static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3832 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3834 // There may be slop, but pretend there isn't because only the asked-for
3835 // area will have been shadowed properly.
3836 return ( md ? md->szB : 0 );
3840 /* For error creation: map 'data_addr' to a malloc'd chunk, if any.
3841 Slow linear search. With a bit of hash table help if 'data_addr'
3842 is either the start of a block or up to 15 word-sized steps along
3843 from the start of a block. */
3845 static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3847 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3849 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3851 /* else normal interval rules apply */
3852 if (LIKELY(a < mm->payload)) return False;
3853 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3857 Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
3858 /*OUT*/Addr* payload,
3864 const Int n_fast_check_words = 16;
3866 /* First, do a few fast searches on the basis that data_addr might
3867 be exactly the start of a block or up to 15 words inside. This
3868 can happen commonly via the creq
3869 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3870 for (i = 0; i < n_fast_check_words; i++) {
3871 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3872 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3873 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3877 /* Well, this totally sucks. But without using an interval tree or
3878 some such, it's hard to see how to do better. We have to check
3879 every block in the entire table. */
3880 VG_(HT_ResetIter)(hg_mallocmeta_table);
3881 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
3882 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3886 /* Not found. Bah. */
3892 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3893 if (where) *where = mm->where;
3894 if (payload) *payload = mm->payload;
3895 if (szB) *szB = mm->szB;
3900 /*--------------------------------------------------------------*/
3901 /*--- Instrumentation ---*/
3902 /*--------------------------------------------------------------*/
3904 static void instrument_mem_access ( IRSB* bbOut,
3910 IRType tyAddr = Ity_INVALID;
3911 HChar* hName = NULL;
3914 IRExpr** argv = NULL;
3917 tl_assert(isIRAtom(addr));
3918 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3920 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3921 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3923 /* So the effective address is in 'addr' now. */
3924 regparms = 1; // unless stated otherwise
3928 hName = "evh__mem_help_cwrite_1";
3929 hAddr = &evh__mem_help_cwrite_1;
3930 argv = mkIRExprVec_1( addr );
3933 hName = "evh__mem_help_cwrite_2";
3934 hAddr = &evh__mem_help_cwrite_2;
3935 argv = mkIRExprVec_1( addr );
3938 hName = "evh__mem_help_cwrite_4";
3939 hAddr = &evh__mem_help_cwrite_4;
3940 argv = mkIRExprVec_1( addr );
3943 hName = "evh__mem_help_cwrite_8";
3944 hAddr = &evh__mem_help_cwrite_8;
3945 argv = mkIRExprVec_1( addr );
3948 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3950 hName = "evh__mem_help_cwrite_N";
3951 hAddr = &evh__mem_help_cwrite_N;
3952 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3958 hName = "evh__mem_help_cread_1";
3959 hAddr = &evh__mem_help_cread_1;
3960 argv = mkIRExprVec_1( addr );
3963 hName = "evh__mem_help_cread_2";
3964 hAddr = &evh__mem_help_cread_2;
3965 argv = mkIRExprVec_1( addr );
3968 hName = "evh__mem_help_cread_4";
3969 hAddr = &evh__mem_help_cread_4;
3970 argv = mkIRExprVec_1( addr );
3973 hName = "evh__mem_help_cread_8";
3974 hAddr = &evh__mem_help_cread_8;
3975 argv = mkIRExprVec_1( addr );
3978 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3980 hName = "evh__mem_help_cread_N";
3981 hAddr = &evh__mem_help_cread_N;
3982 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3987 /* Add the helper. */
3991 di = unsafeIRDirty_0_N( regparms,
3992 hName, VG_(fnptr_to_fnentry)( hAddr ),
3994 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3998 /* Figure out if GA is a guest code address in the dynamic linker, and
3999 if so return True. Otherwise (and in case of any doubt) return
4000 False. (sidedly safe w/ False as the safe value) */
4001 static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4004 const UChar* soname;
4005 if (0) return False;
4007 dinfo = VG_(find_DebugInfo)( (Addr)ga );
4008 if (!dinfo) return False;
4010 soname = VG_(DebugInfo_get_soname)(dinfo);
4012 if (0) VG_(printf)("%s\n", soname);
4014 # if defined(VGO_linux)
4015 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
4016 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4017 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4018 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4019 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4020 # elif defined(VGO_darwin)
4021 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4023 # error "Unsupported OS"
4029 IRSB* hg_instrument ( VgCallbackClosure* closure,
4031 VexGuestLayout* layout,
4032 VexGuestExtents* vge,
4033 IRType gWordTy, IRType hWordTy )
4037 Addr64 cia; /* address of current insn */
4039 Bool inLDSO = False;
4040 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
4042 if (gWordTy != hWordTy) {
4043 /* We don't currently support this case. */
4044 VG_(tool_panic)("host/guest word size mismatch");
4047 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4048 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4052 bbOut = emptyIRSB();
4053 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4054 bbOut->next = deepCopyIRExpr(bbIn->next);
4055 bbOut->jumpkind = bbIn->jumpkind;
4057 // Copy verbatim any IR preamble preceding the first IMark
4059 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4060 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4064 // Get the first statement, and initial cia from it
4065 tl_assert(bbIn->stmts_used > 0);
4066 tl_assert(i < bbIn->stmts_used);
4067 st = bbIn->stmts[i];
4068 tl_assert(Ist_IMark == st->tag);
4069 cia = st->Ist.IMark.addr;
4072 for (/*use current i*/; i < bbIn->stmts_used; i++) {
4073 st = bbIn->stmts[i];
4075 tl_assert(isFlatIRStmt(st));
4082 /* None of these can contain any memory references. */
4086 /* no mem refs, but note the insn address. */
4087 cia = st->Ist.IMark.addr;
4088 /* Don't instrument the dynamic linker. It generates a
4089 lot of races which we just expensively suppress, so
4092 Avoid flooding is_in_dynamic_linker_shared_object with
4093 requests by only checking at transitions between 4K
4095 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4096 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4097 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4098 inLDSO = is_in_dynamic_linker_shared_object(cia);
4100 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4105 switch (st->Ist.MBE.event) {
4107 break; /* not interesting */
4114 /* Atomic read-modify-write cycle. Just pretend it's a
4116 IRCAS* cas = st->Ist.CAS.details;
4117 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4119 tl_assert(cas->expdHi);
4120 tl_assert(cas->dataHi);
4122 tl_assert(!cas->expdHi);
4123 tl_assert(!cas->dataHi);
4125 /* Just be boring about it. */
4127 instrument_mem_access(
4131 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4133 sizeofIRType(hWordTy)
4140 /* We pretend store-conditionals don't exist, viz, ignore
4141 them. Whereas load-linked's are treated the same as
4144 if (st->Ist.LLSC.storedata == NULL) {
4146 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
4148 instrument_mem_access(
4151 sizeofIRType(dataTy),
4153 sizeofIRType(hWordTy)
4164 /* It seems we pretend that store-conditionals don't
4165 exist, viz, just ignore them ... */
4167 instrument_mem_access(
4170 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4172 sizeofIRType(hWordTy)
4178 /* ... whereas here we don't care whether a load is a
4179 vanilla one or a load-linked. */
4180 IRExpr* data = st->Ist.WrTmp.data;
4181 if (data->tag == Iex_Load) {
4183 instrument_mem_access(
4185 data->Iex.Load.addr,
4186 sizeofIRType(data->Iex.Load.ty),
4188 sizeofIRType(hWordTy)
4197 IRDirty* d = st->Ist.Dirty.details;
4198 if (d->mFx != Ifx_None) {
4199 /* This dirty helper accesses memory. Collect the
4201 tl_assert(d->mAddr != NULL);
4202 tl_assert(d->mSize != 0);
4203 dataSize = d->mSize;
4204 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
4206 instrument_mem_access(
4207 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4208 sizeofIRType(hWordTy)
4212 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
4214 instrument_mem_access(
4215 bbOut, d->mAddr, dataSize, True/*isStore*/,
4216 sizeofIRType(hWordTy)
4221 tl_assert(d->mAddr == NULL);
4222 tl_assert(d->mSize == 0);
4232 } /* switch (st->tag) */
4234 addStmtToIRSB( bbOut, st );
4235 } /* iterate over bbIn->stmts */
4241 /*----------------------------------------------------------------*/
4242 /*--- Client requests ---*/
4243 /*----------------------------------------------------------------*/
4245 /* Sheesh. Yet another goddam finite map. */
4246 static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4248 static void map_pthread_t_to_Thread_INIT ( void ) {
4249 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
4250 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4252 tl_assert(map_pthread_t_to_Thread != NULL);
4258 Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4260 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4263 /* Anything that gets past the above check is one of ours, so we
4264 should be able to handle it. */
4266 /* default, meaningless return value, unless otherwise set */
4271 /* --- --- User-visible client requests --- --- */
4273 case VG_USERREQ__HG_CLEAN_MEMORY:
4274 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
4276 /* Call die_mem to (expensively) tidy up properly, if there
4277 are any held locks etc in the area. Calling evh__die_mem
4278 and then evh__new_mem is a bit inefficient; probably just
4279 the latter would do. */
4280 if (args[2] > 0) { /* length */
4281 evh__die_mem(args[1], args[2]);
4282 /* and then set it to New */
4283 evh__new_mem(args[1], args[2]);
4287 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4290 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4292 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4294 evh__die_mem(payload, pszB);
4295 evh__new_mem(payload, pszB);
4304 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4305 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4307 if (args[2] > 0) { /* length */
4308 evh__untrack_mem(args[1], args[2]);
4312 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4313 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4315 if (args[2] > 0) { /* length */
4316 evh__new_mem(args[1], args[2]);
4320 /* --- --- Client requests for Helgrind's use only --- --- */
4322 /* Some thread is telling us its pthread_t value. Record the
4323 binding between that and the associated Thread*, so we can
4324 later find the Thread* again when notified of a join by the
4326 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4327 Thread* my_thr = NULL;
4329 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4331 map_pthread_t_to_Thread_INIT();
4332 my_thr = map_threads_maybe_lookup( tid );
4333 /* This assertion should hold because the map_threads (tid to
4334 Thread*) binding should have been made at the point of
4335 low-level creation of this thread, which should have
4336 happened prior to us getting this client request for it.
4337 That's because this client request is sent from
4338 client-world from the 'thread_wrapper' function, which
4339 only runs once the thread has been low-level created. */
4340 tl_assert(my_thr != NULL);
4341 /* So now we know that (pthread_t)args[1] is associated with
4342 (Thread*)my_thr. Note that down. */
4344 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4345 (void*)args[1], (void*)my_thr );
4346 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
4350 case _VG_USERREQ__HG_PTH_API_ERROR: {
4351 Thread* my_thr = NULL;
4352 map_pthread_t_to_Thread_INIT();
4353 my_thr = map_threads_maybe_lookup( tid );
4354 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
4355 HG_(record_error_PthAPIerror)(
4356 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
4360 /* This thread (tid) has completed a join with the quitting
4361 thread whose pthread_t is in args[1]. */
4362 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4363 Thread* thr_q = NULL; /* quitter Thread* */
4366 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4368 map_pthread_t_to_Thread_INIT();
4369 found = VG_(lookupFM)( map_pthread_t_to_Thread,
4370 NULL, (Word*)&thr_q, (Word)args[1] );
4371 /* Can this fail? It would mean that our pthread_join
4372 wrapper observed a successful join on args[1] yet that
4373 thread never existed (or at least, it never lodged an
4374 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4375 sounds like a bug in the threads library. */
4376 // FIXME: get rid of this assertion; handle properly
4380 VG_(printf)(".................... quitter Thread* = %p\n",
4382 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4387 /* EXPOSITION only: by intercepting lock init events we can show
4388 the user where the lock was initialised, rather than only
4389 being able to show where it was first locked. Intercepting
4390 lock initialisations is not necessary for the basic operation
4391 of the race checker. */
4392 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4393 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4396 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4397 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4400 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4401 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4404 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4405 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4408 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4409 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4412 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4413 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4416 /* This thread is about to do pthread_cond_signal on the
4417 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4418 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4419 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4420 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4423 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4424 Returns a flag indicating whether or not the mutex is believed to be
4425 valid for this operation. */
4426 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4428 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4430 *ret = mutex_is_valid ? 1 : 0;
4435 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4436 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4439 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4441 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4442 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4443 (void*)args[1], (void*)args[2] );
4446 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4447 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4450 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4451 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4454 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
4455 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
4456 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4460 /* rwlock=arg[1], isW=arg[2] */
4461 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4462 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4465 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4466 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4469 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4470 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4473 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4474 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
4477 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4478 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
4481 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4482 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4485 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4486 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
4489 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
4490 /* pth_bar_t*, ulong count, ulong resizable */
4491 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4495 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4496 /* pth_bar_t*, ulong newcount */
4497 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4501 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4503 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4506 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4508 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4511 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4512 /* pth_spinlock_t* */
4513 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4516 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4517 /* pth_spinlock_t* */
4518 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4521 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4522 /* pth_spinlock_t*, Word */
4523 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4526 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4527 /* pth_spinlock_t* */
4528 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4531 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4532 /* pth_spinlock_t* */
4533 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4536 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4538 HChar* who = (HChar*)args[1];
4540 Thread* thr = map_threads_maybe_lookup( tid );
4541 tl_assert( thr ); /* I must be mapped */
4543 tl_assert( VG_(strlen)(who) <= 50 );
4544 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4545 /* record_error_Misc strdup's buf, so this is safe: */
4546 HG_(record_error_Misc)( thr, buf );
4550 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4551 /* UWord arbitrary-SO-tag */
4552 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4555 case _VG_USERREQ__HG_USERSO_RECV_POST:
4556 /* UWord arbitrary-SO-tag */
4557 evh__HG_USERSO_RECV_POST( tid, args[1] );
4560 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
4561 /* UWord arbitrary-SO-tag */
4562 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
4566 /* Unhandled Helgrind client request! */
4567 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4575 /*----------------------------------------------------------------*/
4577 /*----------------------------------------------------------------*/
4579 static Bool hg_process_cmd_line_option ( Char* arg )
4583 if VG_BOOL_CLO(arg, "--track-lockorders",
4584 HG_(clo_track_lockorders)) {}
4585 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4586 HG_(clo_cmp_race_err_addrs)) {}
4588 else if VG_XACT_CLO(arg, "--history-level=none",
4589 HG_(clo_history_level), 0);
4590 else if VG_XACT_CLO(arg, "--history-level=approx",
4591 HG_(clo_history_level), 1);
4592 else if VG_XACT_CLO(arg, "--history-level=full",
4593 HG_(clo_history_level), 2);
4595 /* If you change the 10k/30mill limits, remember to also change
4596 them in assertions at the top of event_map_maybe_GC. */
4597 else if VG_BINT_CLO(arg, "--conflict-cache-size",
4598 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
4600 /* "stuvwx" --> stuvwx (binary) */
4601 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
4604 if (6 != VG_(strlen)(tmp_str)) {
4605 VG_(message)(Vg_UserMsg,
4606 "--hg-sanity-flags argument must have 6 digits\n");
4609 for (j = 0; j < 6; j++) {
4610 if ('0' == tmp_str[j]) { /* do nothing */ }
4611 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
4613 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
4614 "only contain 0s and 1s\n");
4618 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
4621 else if VG_BOOL_CLO(arg, "--free-is-write",
4622 HG_(clo_free_is_write)) {}
4624 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4629 static void hg_print_usage ( void )
4632 " --free-is-write=no|yes treat heap frees as writes [no]\n"
4633 " --track-lockorders=no|yes show lock ordering errors? [yes]\n"
4634 " --history-level=none|approx|full [full]\n"
4635 " full: show both stack traces for a data race (can be very slow)\n"
4636 " approx: full trace for one thread, approx for the other (faster)\n"
4637 " none: only show trace for one thread in a race (fastest)\n"
4638 " --conflict-cache-size=N size of 'full' history cache [1000000]\n"
4642 static void hg_print_debug_usage ( void )
4644 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4645 "race errors significant? [no]\n");
4646 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
4647 " at events (X = 0|1) [000000]\n");
4648 VG_(printf)(" --hg-sanity-flags values:\n");
4649 VG_(printf)(" 010000 after changes to "
4650 "lock-order-acquisition-graph\n");
4651 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4652 VG_(printf)(" 000100 at mem permission setting for "
4653 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
4654 VG_(printf)(" 000010 at lock/unlock events\n");
4655 VG_(printf)(" 000001 at thread create/join events\n");
4658 static void hg_fini ( Int exitcode )
4660 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4661 VG_(message)(Vg_UserMsg,
4662 "For counts of detected and suppressed errors, "
4663 "rerun with: -v\n");
4666 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4667 && HG_(clo_history_level) >= 2) {
4669 "Use --history-level=approx or =none to gain increased speed, at\n" );
4671 "the cost of reduced accuracy of conflicting-access information\n");
4674 if (SHOW_DATA_STRUCTURES)
4675 pp_everything( PP_ALL, "SK_(fini)" );
4676 if (HG_(clo_sanity_flags))
4677 all__sanity_check("SK_(fini)");
4679 if (VG_(clo_stats)) {
4683 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4684 if (HG_(clo_track_lockorders)) {
4686 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4690 //zz VG_(printf)("\n");
4691 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4692 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4693 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4694 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4695 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4696 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4697 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4698 //zz stats__hbefore_stk_hwm);
4699 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4700 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
4703 VG_(printf)(" locksets: %'8d unique lock sets\n",
4704 (Int)HG_(cardinalityWSU)( univ_lsets ));
4705 if (HG_(clo_track_lockorders)) {
4706 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4707 (Int)HG_(cardinalityWSU)( univ_laog ));
4710 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4711 // stats__ga_LL_adds,
4712 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
4714 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4715 HG_(stats__LockN_to_P_queries),
4716 HG_(stats__LockN_to_P_get_map_size)() );
4718 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4719 HG_(stats__string_table_queries),
4720 HG_(stats__string_table_get_map_size)() );
4721 if (HG_(clo_track_lockorders)) {
4722 VG_(printf)(" LAOG: %'8d map size\n",
4723 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4724 VG_(printf)(" LAOG exposition: %'8d map size\n",
4725 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4728 VG_(printf)(" locks: %'8lu acquires, "
4730 stats__lockN_acquires,
4731 stats__lockN_releases
4733 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
4736 libhb_shutdown(True);
4740 /* FIXME: move these somewhere sane */
4743 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4749 thr = libhb_get_Thr_hgthread( hbt );
4751 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4752 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4754 tl_assert(nActual <= nRequest);
4755 for (; nActual < nRequest; nActual++)
4756 frames[nActual] = 0;
4760 ExeContext* for_libhb__get_EC ( Thr* hbt )
4766 thr = libhb_get_Thr_hgthread( hbt );
4768 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4769 /* this will assert if tid is invalid */
4770 ec = VG_(record_ExeContext)( tid, 0 );
4775 static void hg_post_clo_init ( void )
4779 /////////////////////////////////////////////
4780 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4781 for_libhb__get_EC );
4782 /////////////////////////////////////////////
4785 if (HG_(clo_track_lockorders))
4788 initialise_data_structures(hbthr_root);
4791 static void hg_pre_clo_init ( void )
4793 VG_(details_name) ("Helgrind");
4794 VG_(details_version) (NULL);
4795 VG_(details_description) ("a thread error detector");
4796 VG_(details_copyright_author)(
4797 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
4798 VG_(details_bug_reports_to) (VG_BUGS_TO);
4799 VG_(details_avg_translation_sizeB) ( 320 );
4801 VG_(basic_tool_funcs) (hg_post_clo_init,
4805 VG_(needs_core_errors) ();
4806 VG_(needs_tool_errors) (HG_(eq_Error),
4807 HG_(before_pp_Error),
4809 False,/*show TIDs for errors*/
4811 HG_(recognised_suppression),
4812 HG_(read_extra_suppression_info),
4813 HG_(error_matches_suppression),
4814 HG_(get_error_name),
4815 HG_(get_extra_suppression_info));
4817 VG_(needs_xml_output) ();
4819 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4821 hg_print_debug_usage);
4822 VG_(needs_client_requests) (hg_handle_client_request);
4825 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4826 // hg_expensive_sanity_check);
4828 VG_(needs_malloc_replacement) (hg_cli__malloc,
4829 hg_cli____builtin_new,
4830 hg_cli____builtin_vec_new,
4834 hg_cli____builtin_delete,
4835 hg_cli____builtin_vec_delete,
4837 hg_cli_malloc_usable_size,
4838 HG_CLI__MALLOC_REDZONE_SZB );
4840 /* 21 Dec 08: disabled this; it mostly causes H to start more
4841 slowly and use significantly more memory, without very often
4842 providing useful results. The user can request to load this
4843 information manually with --read-var-info=yes. */
4844 if (0) VG_(needs_var_info)(); /* optional */
4846 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
4847 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4848 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
4849 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
4850 VG_(track_new_mem_stack) ( evh__new_mem_stack );
4852 // FIXME: surely this isn't thread-aware
4853 VG_(track_copy_mem_remap) ( evh__copy_mem );
4855 VG_(track_change_mem_mprotect) ( evh__set_perms );
4857 VG_(track_die_mem_stack_signal)( evh__die_mem );
4858 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
4859 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
4860 VG_(track_die_mem_stack) ( evh__die_mem );
4862 // FIXME: what is this for?
4863 VG_(track_ban_mem_stack) (NULL);
4865 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4866 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4867 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4868 VG_(track_post_mem_write) (NULL);
4872 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4873 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4875 VG_(track_start_client_code)( evh__start_client_code );
4876 VG_(track_stop_client_code)( evh__stop_client_code );
4878 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4879 as described in comments at the top of pub_tool_hashtable.h, are
4881 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4882 tl_assert( sizeof(UWord) == sizeof(Addr) );
4884 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4886 // add a callback to clean up on (threaded) fork.
4887 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
4890 VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4892 /*--------------------------------------------------------------------*/
4893 /*--- end hg_main.c ---*/
4894 /*--------------------------------------------------------------------*/