1 /* -*- mode: C; c-basic-offset: 3; indent-tabs-mode: nil; -*- */
3 This file is part of drd, a thread error detector.
5 Copyright (C) 2006-2011 Bart Van Assche <bvanassche@acm.org>.
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
22 The GNU General Public License is contained in the file COPYING.
26 #include "drd_error.h"
27 #include "drd_barrier.h"
28 #include "drd_clientobj.h"
30 #include "drd_mutex.h"
31 #include "drd_segment.h"
32 #include "drd_semaphore.h"
33 #include "drd_suppression.h"
34 #include "drd_thread.h"
35 #include "pub_tool_vki.h"
36 #include "pub_tool_basics.h" // Addr, SizeT
37 #include "pub_tool_libcassert.h" // tl_assert()
38 #include "pub_tool_libcbase.h" // VG_(strlen)()
39 #include "pub_tool_libcprint.h" // VG_(printf)()
40 #include "pub_tool_libcproc.h" // VG_(getenv)()
41 #include "pub_tool_machine.h"
42 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
43 #include "pub_tool_options.h" // VG_(clo_backtrace_size)
44 #include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
48 /* Local functions. */
50 static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
51 static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
52 static void thread_compute_conflict_set(struct bitmap** conflict_set,
53 const DrdThreadId tid);
54 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
57 /* Local variables. */
59 static ULong s_context_switch_count;
60 static ULong s_discard_ordered_segments_count;
61 static ULong s_compute_conflict_set_count;
62 static ULong s_update_conflict_set_count;
63 static ULong s_update_conflict_set_new_sg_count;
64 static ULong s_update_conflict_set_sync_count;
65 static ULong s_update_conflict_set_join_count;
66 static ULong s_conflict_set_bitmap_creation_count;
67 static ULong s_conflict_set_bitmap2_creation_count;
68 static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
69 DrdThreadId DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
70 ThreadInfo DRD_(g_threadinfo)[DRD_N_THREADS];
71 struct bitmap* DRD_(g_conflict_set);
72 static Bool s_trace_context_switches = False;
73 static Bool s_trace_conflict_set = False;
74 static Bool s_trace_conflict_set_bm = False;
75 static Bool s_trace_fork_join = False;
76 static Bool s_segment_merging = True;
77 static Bool s_new_segments_since_last_merge;
78 static int s_segment_merge_interval = 10;
81 /* Function definitions. */
83 /** Enables/disables context switch tracing. */
84 void DRD_(thread_trace_context_switches)(const Bool t)
86 tl_assert(t == False || t == True);
87 s_trace_context_switches = t;
90 /** Enables/disables conflict set tracing. */
91 void DRD_(thread_trace_conflict_set)(const Bool t)
93 tl_assert(t == False || t == True);
94 s_trace_conflict_set = t;
97 /** Enables/disables conflict set bitmap tracing. */
98 void DRD_(thread_trace_conflict_set_bm)(const Bool t)
100 tl_assert(t == False || t == True);
101 s_trace_conflict_set_bm = t;
104 /** Report whether fork/join tracing is enabled. */
105 Bool DRD_(thread_get_trace_fork_join)(void)
107 return s_trace_fork_join;
110 /** Enables/disables fork/join tracing. */
111 void DRD_(thread_set_trace_fork_join)(const Bool t)
113 tl_assert(t == False || t == True);
114 s_trace_fork_join = t;
117 /** Enables/disables segment merging. */
118 void DRD_(thread_set_segment_merging)(const Bool m)
120 tl_assert(m == False || m == True);
121 s_segment_merging = m;
124 /** Get the segment merging interval. */
125 int DRD_(thread_get_segment_merge_interval)(void)
127 return s_segment_merge_interval;
130 /** Set the segment merging interval. */
131 void DRD_(thread_set_segment_merge_interval)(const int i)
133 s_segment_merge_interval = i;
137 * Convert Valgrind's ThreadId into a DrdThreadId.
139 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
140 * Valgrind ThreadId does not yet exist.
142 DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
146 if (tid == VG_INVALID_THREADID)
147 return DRD_INVALID_THREADID;
149 for (i = 1; i < DRD_N_THREADS; i++)
151 if (DRD_(g_threadinfo)[i].vg_thread_exists == True
152 && DRD_(g_threadinfo)[i].vg_threadid == tid)
158 return DRD_INVALID_THREADID;
161 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
162 static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
166 tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
168 for (i = 1; i < DRD_N_THREADS; i++)
170 if (DRD_(g_threadinfo)[i].vg_thread_exists == False
171 && DRD_(g_threadinfo)[i].posix_thread_exists == False
172 && DRD_(g_threadinfo)[i].detached_posix_thread == False)
174 tl_assert(! DRD_(IsValidDrdThreadId)(i));
176 DRD_(g_threadinfo)[i].vg_thread_exists = True;
177 DRD_(g_threadinfo)[i].vg_threadid = tid;
178 DRD_(g_threadinfo)[i].pt_threadid = INVALID_POSIX_THREADID;
179 DRD_(g_threadinfo)[i].stack_min = 0;
180 DRD_(g_threadinfo)[i].stack_min_min = 0;
181 DRD_(g_threadinfo)[i].stack_startup = 0;
182 DRD_(g_threadinfo)[i].stack_max = 0;
183 DRD_(thread_set_name)(i, "");
184 DRD_(g_threadinfo)[i].on_alt_stack = False;
185 DRD_(g_threadinfo)[i].is_recording_loads = True;
186 DRD_(g_threadinfo)[i].is_recording_stores = True;
187 DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
188 DRD_(g_threadinfo)[i].synchr_nesting = 0;
189 tl_assert(DRD_(g_threadinfo)[i].first == 0);
190 tl_assert(DRD_(g_threadinfo)[i].last == 0);
192 tl_assert(DRD_(IsValidDrdThreadId)(i));
199 "\nSorry, but the maximum number of threads supported by DRD has been exceeded."
204 return DRD_INVALID_THREADID;
207 /** Convert a POSIX thread ID into a DRD thread ID. */
208 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
212 if (tid != INVALID_POSIX_THREADID)
214 for (i = 1; i < DRD_N_THREADS; i++)
216 if (DRD_(g_threadinfo)[i].posix_thread_exists
217 && DRD_(g_threadinfo)[i].pt_threadid == tid)
223 return DRD_INVALID_THREADID;
226 /** Convert a DRD thread ID into a Valgrind thread ID. */
227 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
229 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
230 && tid != DRD_INVALID_THREADID);
232 return (DRD_(g_threadinfo)[tid].vg_thread_exists
233 ? DRD_(g_threadinfo)[tid].vg_threadid
234 : VG_INVALID_THREADID);
237 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
239 * Sanity check of the doubly linked list of segments referenced by a
241 * @return True if sane, False if not.
243 static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
247 for (p = ti->first; p; p = p->next) {
248 if (p->next && p->next->prev != p)
250 if (p->next == 0 && p != ti->last)
253 for (p = ti->last; p; p = p->prev) {
254 if (p->prev && p->prev->next != p)
256 if (p->prev == 0 && p != ti->first)
264 * Create the first segment for a newly started thread.
266 * This function is called from the handler installed via
267 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
268 * from the context of the creator thread, before the new thread has been
271 * @param[in] creator DRD thread ID of the creator thread.
272 * @param[in] vg_created Valgrind thread ID of the created thread.
274 * @return DRD thread ID of the created thread.
276 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
277 const ThreadId vg_created)
281 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
282 created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
283 tl_assert(0 <= (int)created && created < DRD_N_THREADS
284 && created != DRD_INVALID_THREADID);
286 tl_assert(DRD_(g_threadinfo)[created].first == 0);
287 tl_assert(DRD_(g_threadinfo)[created].last == 0);
288 /* Create an initial segment for the newly created thread. */
289 thread_append_segment(created, DRD_(sg_new)(creator, created));
295 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
296 * after the thread has been created and before any client instructions are run
297 * on the newly created thread, e.g. from the handler installed via
298 * VG_(track_pre_thread_first_insn)().
300 * @param[in] vg_created Valgrind thread ID of the newly created thread.
302 * @return DRD thread ID for the new thread.
304 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
306 const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
308 tl_assert(0 <= (int)created && created < DRD_N_THREADS
309 && created != DRD_INVALID_THREADID);
311 DRD_(g_threadinfo)[created].stack_max
312 = VG_(thread_get_stack_max)(vg_created);
313 DRD_(g_threadinfo)[created].stack_startup
314 = DRD_(g_threadinfo)[created].stack_max;
315 DRD_(g_threadinfo)[created].stack_min
316 = DRD_(g_threadinfo)[created].stack_max;
317 DRD_(g_threadinfo)[created].stack_min_min
318 = DRD_(g_threadinfo)[created].stack_max;
319 DRD_(g_threadinfo)[created].stack_size
320 = VG_(thread_get_stack_size)(vg_created);
321 tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
327 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
328 * after thread drd_joiner joined thread drd_joinee.
330 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
332 tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
333 tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
335 DRD_(thread_new_segment)(drd_joiner);
336 DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
337 DRD_(thread_new_segment)(drd_joinee);
339 if (s_trace_fork_join)
341 const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
342 const unsigned msg_size = 256;
345 msg = VG_(malloc)("drd.main.dptj.1", msg_size);
347 VG_(snprintf)(msg, msg_size,
348 "drd_post_thread_join joiner = %d, joinee = %d",
349 drd_joiner, drd_joinee);
354 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
355 VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
359 VG_(message)(Vg_DebugMsg, "%s\n", msg);
363 if (! DRD_(get_check_stack_accesses)())
365 DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
366 - DRD_(thread_get_stack_size)(drd_joinee),
367 DRD_(thread_get_stack_max)(drd_joinee));
369 DRD_(clientobj_delete_thread)(drd_joinee);
370 DRD_(thread_delete)(drd_joinee, False);
374 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
375 * and accesses this data structure from multiple threads without locking.
376 * Any conflicting accesses in the range stack_startup..stack_max will be
379 void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
380 const Addr stack_startup)
382 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
383 && tid != DRD_INVALID_THREADID);
384 tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
385 tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
386 DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
389 /** Return the stack pointer for the specified thread. */
390 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
392 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
393 && tid != DRD_INVALID_THREADID);
394 return DRD_(g_threadinfo)[tid].stack_min;
398 * Return the lowest value that was ever assigned to the stack pointer
399 * for the specified thread.
401 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
403 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
404 && tid != DRD_INVALID_THREADID);
405 return DRD_(g_threadinfo)[tid].stack_min_min;
408 /** Return the top address for the stack of the specified thread. */
409 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
411 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
412 && tid != DRD_INVALID_THREADID);
413 return DRD_(g_threadinfo)[tid].stack_max;
416 /** Return the maximum stack size for the specified thread. */
417 SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
419 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
420 && tid != DRD_INVALID_THREADID);
421 return DRD_(g_threadinfo)[tid].stack_size;
424 Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
426 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
427 && tid != DRD_INVALID_THREADID);
428 return DRD_(g_threadinfo)[tid].on_alt_stack;
431 void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
432 const Bool on_alt_stack)
434 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
435 && tid != DRD_INVALID_THREADID);
436 tl_assert(on_alt_stack == !!on_alt_stack);
437 DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
440 Int DRD_(thread_get_threads_on_alt_stack)(void)
444 for (i = 1; i < DRD_N_THREADS; i++)
445 n += DRD_(g_threadinfo)[i].on_alt_stack;
450 * Clean up thread-specific data structures. Call this just after
453 void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
458 tl_assert(DRD_(IsValidDrdThreadId)(tid));
460 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
461 for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev)
468 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
469 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
471 DRD_(g_threadinfo)[tid].detached_posix_thread = False;
473 tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
474 DRD_(g_threadinfo)[tid].first = 0;
475 DRD_(g_threadinfo)[tid].last = 0;
477 tl_assert(! DRD_(IsValidDrdThreadId)(tid));
481 * Called after a thread performed its last memory access and before
482 * thread_delete() is called. Note: thread_delete() is only called for
483 * joinable threads, not for detached threads.
485 void DRD_(thread_finished)(const DrdThreadId tid)
487 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
488 && tid != DRD_INVALID_THREADID);
490 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
492 if (DRD_(g_threadinfo)[tid].detached_posix_thread)
495 * Once a detached thread has finished, its stack is deallocated and
496 * should no longer be taken into account when computing the conflict set.
498 DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
501 * For a detached thread, calling pthread_exit() invalidates the
502 * POSIX thread ID associated with the detached thread. For joinable
503 * POSIX threads however, the POSIX thread ID remains live after the
504 * pthread_exit() call until pthread_join() is called.
506 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
510 /** Called just after fork() in the child process. */
511 void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
515 for (i = 1; i < DRD_N_THREADS; i++)
519 if (DRD_(IsValidDrdThreadId(i)))
520 DRD_(thread_delete)(i, True);
521 tl_assert(!DRD_(IsValidDrdThreadId(i)));
525 /** Called just before pthread_cancel(). */
526 void DRD_(thread_pre_cancel)(const DrdThreadId tid)
528 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
529 && tid != DRD_INVALID_THREADID);
530 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
532 if (DRD_(thread_get_trace_fork_join)())
533 VG_(message)(Vg_UserMsg, "[%d] drd_thread_pre_cancel %d\n",
534 DRD_(g_drd_running_tid), tid);
538 * Store the POSIX thread ID for the specified thread.
540 * @note This function can be called two times for the same thread -- see also
541 * the comment block preceding the pthread_create() wrapper in
542 * drd_pthread_intercepts.c.
544 void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
546 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
547 && tid != DRD_INVALID_THREADID);
548 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
549 || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
550 tl_assert(ptid != INVALID_POSIX_THREADID);
551 DRD_(g_threadinfo)[tid].posix_thread_exists = True;
552 DRD_(g_threadinfo)[tid].pt_threadid = ptid;
555 /** Returns true for joinable threads and false for detached threads. */
556 Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
558 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
559 && tid != DRD_INVALID_THREADID);
560 return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
563 /** Store the thread mode: joinable or detached. */
564 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
566 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
567 && tid != DRD_INVALID_THREADID);
568 tl_assert(!! joinable == joinable);
569 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
571 DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
574 /** Tells DRD that the calling thread is about to enter pthread_create(). */
575 void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
577 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
578 && tid != DRD_INVALID_THREADID);
579 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
580 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
582 DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
585 /** Tells DRD that the calling thread has left pthread_create(). */
586 void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
588 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
589 && tid != DRD_INVALID_THREADID);
590 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
591 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
593 DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
596 /** Obtain the thread number and the user-assigned thread name. */
597 const char* DRD_(thread_get_name)(const DrdThreadId tid)
599 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
600 && tid != DRD_INVALID_THREADID);
602 return DRD_(g_threadinfo)[tid].name;
605 /** Set the name of the specified thread. */
606 void DRD_(thread_set_name)(const DrdThreadId tid, const char* const name)
608 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
609 && tid != DRD_INVALID_THREADID);
611 if (name == NULL || name[0] == 0)
612 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
613 sizeof(DRD_(g_threadinfo)[tid].name),
617 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
618 sizeof(DRD_(g_threadinfo)[tid].name),
621 DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
625 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
628 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
630 tl_assert(vg_tid != VG_INVALID_THREADID);
632 if (vg_tid != s_vg_running_tid)
634 DRD_(thread_set_running_tid)(vg_tid,
635 DRD_(VgThreadIdToDrdThreadId)(vg_tid));
638 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
639 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
643 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
646 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
647 const DrdThreadId drd_tid)
649 tl_assert(vg_tid != VG_INVALID_THREADID);
650 tl_assert(drd_tid != DRD_INVALID_THREADID);
652 if (vg_tid != s_vg_running_tid)
654 if (s_trace_context_switches
655 && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
657 VG_(message)(Vg_DebugMsg,
658 "Context switch from thread %d to thread %d;"
660 DRD_(g_drd_running_tid), drd_tid,
661 DRD_(sg_get_segments_alive_count)());
663 s_vg_running_tid = vg_tid;
664 DRD_(g_drd_running_tid) = drd_tid;
665 thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
666 s_context_switch_count++;
669 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
670 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
674 * Increase the synchronization nesting counter. Must be called before the
675 * client calls a synchronization function.
677 int DRD_(thread_enter_synchr)(const DrdThreadId tid)
679 tl_assert(DRD_(IsValidDrdThreadId)(tid));
680 return DRD_(g_threadinfo)[tid].synchr_nesting++;
684 * Decrease the synchronization nesting counter. Must be called after the
685 * client left a synchronization function.
687 int DRD_(thread_leave_synchr)(const DrdThreadId tid)
689 tl_assert(DRD_(IsValidDrdThreadId)(tid));
690 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
691 return --DRD_(g_threadinfo)[tid].synchr_nesting;
694 /** Returns the synchronization nesting counter. */
695 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
697 tl_assert(DRD_(IsValidDrdThreadId)(tid));
698 return DRD_(g_threadinfo)[tid].synchr_nesting;
701 /** Append a new segment at the end of the segment list. */
703 void thread_append_segment(const DrdThreadId tid, Segment* const sg)
705 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
706 && tid != DRD_INVALID_THREADID);
708 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
709 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
712 sg->prev = DRD_(g_threadinfo)[tid].last;
714 if (DRD_(g_threadinfo)[tid].last)
715 DRD_(g_threadinfo)[tid].last->next = sg;
716 DRD_(g_threadinfo)[tid].last = sg;
717 if (DRD_(g_threadinfo)[tid].first == 0)
718 DRD_(g_threadinfo)[tid].first = sg;
720 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
721 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
726 * Remove a segment from the segment list of thread threadid, and free the
730 void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
732 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
733 && tid != DRD_INVALID_THREADID);
735 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
736 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
740 sg->prev->next = sg->next;
742 sg->next->prev = sg->prev;
743 if (sg == DRD_(g_threadinfo)[tid].first)
744 DRD_(g_threadinfo)[tid].first = sg->next;
745 if (sg == DRD_(g_threadinfo)[tid].last)
746 DRD_(g_threadinfo)[tid].last = sg->prev;
749 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
750 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
755 * Returns a pointer to the vector clock of the most recent segment associated
758 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
760 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
761 && tid != DRD_INVALID_THREADID);
762 tl_assert(DRD_(g_threadinfo)[tid].last);
763 return &DRD_(g_threadinfo)[tid].last->vc;
767 * Return the latest segment of thread 'tid' and increment its reference count.
769 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
772 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
773 && tid != DRD_INVALID_THREADID);
774 tl_assert(DRD_(g_threadinfo)[tid].last);
777 *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last);
781 * Compute the minimum of all latest vector clocks of all threads
782 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
784 * @param vc pointer to a vectorclock, holds result upon return.
786 static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
793 for (i = 0; i < DRD_N_THREADS; i++)
795 latest_sg = DRD_(g_threadinfo)[i].last;
799 DRD_(vc_assign)(vc, &latest_sg->vc);
801 DRD_(vc_min)(vc, &latest_sg->vc);
808 * Compute the maximum of all latest vector clocks of all threads.
810 * @param vc pointer to a vectorclock, holds result upon return.
812 static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
819 for (i = 0; i < DRD_N_THREADS; i++)
821 latest_sg = DRD_(g_threadinfo)[i].last;
825 DRD_(vc_assign)(vc, &latest_sg->vc);
827 DRD_(vc_combine)(vc, &latest_sg->vc);
834 * Discard all segments that have a defined order against the latest vector
835 * clock of all threads -- these segments can no longer be involved in a
838 static void thread_discard_ordered_segments(void)
841 VectorClock thread_vc_min;
843 s_discard_ordered_segments_count++;
845 DRD_(vc_init)(&thread_vc_min, 0, 0);
846 DRD_(thread_compute_minimum_vc)(&thread_vc_min);
847 if (DRD_(sg_get_trace)())
849 char *vc_min, *vc_max;
850 VectorClock thread_vc_max;
852 DRD_(vc_init)(&thread_vc_max, 0, 0);
853 DRD_(thread_compute_maximum_vc)(&thread_vc_max);
854 vc_min = DRD_(vc_aprint)(&thread_vc_min);
855 vc_max = DRD_(vc_aprint)(&thread_vc_max);
856 VG_(message)(Vg_DebugMsg,
857 "Discarding ordered segments -- min vc is %s, max vc is %s\n",
861 DRD_(vc_cleanup)(&thread_vc_max);
864 for (i = 0; i < DRD_N_THREADS; i++)
868 for (sg = DRD_(g_threadinfo)[i].first;
869 sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
872 thread_discard_segment(i, sg);
875 DRD_(vc_cleanup)(&thread_vc_min);
879 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
880 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
881 * all segments in the set CS are ordered consistently against both sg1 and
882 * sg2. The set CS is defined as the set of segments that can immediately
883 * precede future segments via inter-thread synchronization operations. In
884 * DRD the set CS consists of the latest segment of each thread combined with
885 * all segments for which the reference count is strictly greater than one.
886 * The code below is an optimized version of the following:
888 * for (i = 0; i < DRD_N_THREADS; i++)
892 * for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
894 * if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
896 * if ( DRD_(vc_lte)(&sg1->vc, &sg->vc)
897 * != DRD_(vc_lte)(&sg2->vc, &sg->vc)
898 * || DRD_(vc_lte)(&sg->vc, &sg1->vc)
899 * != DRD_(vc_lte)(&sg->vc, &sg2->vc))
907 static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
913 tl_assert(sg1->next);
914 tl_assert(sg2->next);
915 tl_assert(sg1->next == sg2);
916 tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
918 for (i = 0; i < DRD_N_THREADS; i++)
922 for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
924 if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
926 if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
928 if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
932 for (sg = DRD_(g_threadinfo)[i].last; sg; sg = sg->prev)
934 if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
936 if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
938 if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
947 * Merge all segments that may be merged without triggering false positives
948 * or discarding real data races. For the theoretical background of segment
949 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
950 * and Koen De Bosschere. Bounding the number of segment histories during
951 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
952 * pp 1221-1238, September 2002. This paper contains a proof that merging
953 * consecutive segments for which the property equiv(s1,s2) holds can be
954 * merged without reducing the accuracy of datarace detection. Furthermore
955 * it is also proven that the total number of all segments will never grow
956 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
957 * every time a new segment is created. The property equiv(s1, s2) is defined
958 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
959 * clocks of segments s and s1 are ordered in the same way as those of segments
960 * s and s2. The set CS is defined as the set of existing segments s that have
961 * the potential to conflict with not yet created segments, either because the
962 * segment s is the latest segment of a thread or because it can become the
963 * immediate predecessor of a new segment due to a synchronization operation.
965 static void thread_merge_segments(void)
969 s_new_segments_since_last_merge = 0;
971 for (i = 0; i < DRD_N_THREADS; i++)
975 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
976 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
979 for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
981 if (DRD_(sg_get_refcnt)(sg) == 1
983 && DRD_(sg_get_refcnt)(sg->next) == 1
985 && thread_consistent_segment_ordering(i, sg, sg->next))
987 /* Merge sg and sg->next into sg. */
988 DRD_(sg_merge)(sg, sg->next);
989 thread_discard_segment(i, sg->next);
993 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
994 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1000 * Create a new segment for the specified thread, and discard any segments
1001 * that cannot cause races anymore.
1003 void DRD_(thread_new_segment)(const DrdThreadId tid)
1008 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1009 && tid != DRD_INVALID_THREADID);
1010 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1012 last_sg = DRD_(g_threadinfo)[tid].last;
1013 new_sg = DRD_(sg_new)(tid, tid);
1014 thread_append_segment(tid, new_sg);
1015 if (tid == DRD_(g_drd_running_tid) && last_sg)
1017 DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1018 s_update_conflict_set_new_sg_count++;
1021 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1023 if (s_segment_merging
1024 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1026 thread_discard_ordered_segments();
1027 thread_merge_segments();
1031 /** Call this function after thread 'joiner' joined thread 'joinee'. */
1032 void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1034 tl_assert(joiner != joinee);
1035 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1036 && joiner != DRD_INVALID_THREADID);
1037 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1038 && joinee != DRD_INVALID_THREADID);
1039 tl_assert(DRD_(g_threadinfo)[joiner].last);
1040 tl_assert(DRD_(g_threadinfo)[joinee].last);
1042 if (DRD_(sg_get_trace)())
1045 str1 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc);
1046 str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joinee].last->vc);
1047 VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1052 if (joiner == DRD_(g_drd_running_tid))
1056 DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[joiner].last->vc);
1057 DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
1058 &DRD_(g_threadinfo)[joinee].last->vc);
1059 DRD_(thread_update_conflict_set)(joiner, &old_vc);
1060 s_update_conflict_set_join_count++;
1061 DRD_(vc_cleanup)(&old_vc);
1065 DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
1066 &DRD_(g_threadinfo)[joinee].last->vc);
1069 thread_discard_ordered_segments();
1071 if (DRD_(sg_get_trace)())
1074 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc);
1075 VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1081 * Update the vector clock of the last segment of thread tid with the
1082 * the vector clock of segment sg.
1084 static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1086 const VectorClock* const vc = &sg->vc;
1088 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1089 && tid != DRD_INVALID_THREADID);
1090 tl_assert(DRD_(g_threadinfo)[tid].last);
1098 DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[tid].last->vc);
1099 DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, vc);
1100 if (DRD_(sg_get_trace)())
1103 str1 = DRD_(vc_aprint)(&old_vc);
1104 str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
1105 VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
1110 thread_discard_ordered_segments();
1112 DRD_(thread_update_conflict_set)(tid, &old_vc);
1113 s_update_conflict_set_sync_count++;
1115 DRD_(vc_cleanup)(&old_vc);
1119 tl_assert(DRD_(vc_lte)(vc, &DRD_(g_threadinfo)[tid].last->vc));
1124 * Create a new segment for thread tid and update the vector clock of the last
1125 * segment of this thread with the the vector clock of segment sg. Call this
1126 * function after thread tid had to wait because of thread synchronization
1127 * until the memory accesses in the segment sg finished.
1129 void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1131 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1132 && tid != DRD_INVALID_THREADID);
1133 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1136 thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1138 thread_combine_vc_sync(tid, sg);
1140 if (s_segment_merging
1141 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1143 thread_discard_ordered_segments();
1144 thread_merge_segments();
1149 * Call this function whenever a thread is no longer using the memory
1150 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1153 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
1155 DrdThreadId other_user;
1158 /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
1159 other_user = DRD_INVALID_THREADID;
1160 for (i = 0; i < DRD_N_THREADS; i++)
1163 for (p = DRD_(g_threadinfo)[i].first; p; p = p->next) {
1164 if (other_user == DRD_INVALID_THREADID
1165 && i != DRD_(g_drd_running_tid)) {
1166 if (UNLIKELY(DRD_(bm_test_and_clear)(DRD_(sg_bm)(p), a1, a2)))
1169 DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1174 * If any other thread had accessed memory in [ a1, a2 [, update the
1177 if (other_user != DRD_INVALID_THREADID
1178 && DRD_(bm_has_any_access)(DRD_(g_conflict_set), a1, a2))
1180 thread_compute_conflict_set(&DRD_(g_conflict_set),
1181 DRD_(thread_get_running_tid)());
1185 /** Specify whether memory loads should be recorded. */
1186 void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1188 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1189 && tid != DRD_INVALID_THREADID);
1190 tl_assert(enabled == !! enabled);
1192 DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1195 /** Specify whether memory stores should be recorded. */
1196 void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1198 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1199 && tid != DRD_INVALID_THREADID);
1200 tl_assert(enabled == !! enabled);
1202 DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1206 * Print the segment information for all threads.
1208 * This function is only used for debugging purposes.
1210 void DRD_(thread_print_all)(void)
1215 for (i = 0; i < DRD_N_THREADS; i++)
1217 if (DRD_(g_threadinfo)[i].first)
1219 VG_(printf)("**************\n"
1220 "* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
1223 DRD_(g_threadinfo)[i].vg_thread_exists,
1224 DRD_(g_threadinfo)[i].vg_threadid,
1225 DRD_(g_threadinfo)[i].posix_thread_exists,
1226 DRD_(g_threadinfo)[i].pt_threadid,
1227 DRD_(g_threadinfo)[i].detached_posix_thread);
1228 for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
1236 /** Show a call stack involved in a data race. */
1237 static void show_call_stack(const DrdThreadId tid,
1238 const Char* const msg,
1239 ExeContext* const callstack)
1241 const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1243 VG_(message)(Vg_UserMsg, "%s (thread %d)\n", msg, tid);
1245 if (vg_tid != VG_INVALID_THREADID)
1249 VG_(pp_ExeContext)(callstack);
1253 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1258 VG_(message)(Vg_UserMsg,
1259 " (thread finished, call stack no longer available)\n");
1263 /** Print information about the segments involved in a data race. */
1265 thread_report_conflicting_segments_segment(const DrdThreadId tid,
1268 const BmAccessTypeT access_type,
1269 const Segment* const p)
1273 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1274 && tid != DRD_INVALID_THREADID);
1277 for (i = 0; i < DRD_N_THREADS; i++)
1282 for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev)
1285 * Since q iterates over the segments of thread i in order of
1286 * decreasing vector clocks, if q->vc <= p->vc, then
1287 * q->next->vc <= p->vc will also hold. Hence, break out of the
1288 * loop once this condition is met.
1290 if (DRD_(vc_lte)(&q->vc, &p->vc))
1292 if (! DRD_(vc_lte)(&p->vc, &q->vc))
1294 if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1297 tl_assert(q->stacktrace);
1298 show_call_stack(i, "Other segment start",
1300 show_call_stack(i, "Other segment end",
1301 q->next ? q->next->stacktrace : 0);
1309 /** Print information about all segments involved in a data race. */
1310 void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1313 const BmAccessTypeT access_type)
1317 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1318 && tid != DRD_INVALID_THREADID);
1320 for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next)
1322 if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1324 thread_report_conflicting_segments_segment(tid, addr, size,
1331 * Verify whether the conflict set for thread tid is up to date. Only perform
1332 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1334 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1336 static int do_verify_conflict_set = -1;
1338 struct bitmap* computed_conflict_set = 0;
1340 if (do_verify_conflict_set < 0)
1341 do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0;
1343 if (do_verify_conflict_set == 0)
1346 thread_compute_conflict_set(&computed_conflict_set, tid);
1347 result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1350 VG_(printf)("actual conflict set:\n");
1351 DRD_(bm_print)(DRD_(g_conflict_set));
1353 VG_(printf)("computed conflict set:\n");
1354 DRD_(bm_print)(computed_conflict_set);
1357 DRD_(bm_delete)(computed_conflict_set);
1362 * Compute the conflict set: a bitmap that represents the union of all memory
1363 * accesses of all segments that are unordered to the current segment of the
1366 static void thread_compute_conflict_set(struct bitmap** conflict_set,
1367 const DrdThreadId tid)
1371 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1372 && tid != DRD_INVALID_THREADID);
1373 tl_assert(tid == DRD_(g_drd_running_tid));
1375 s_compute_conflict_set_count++;
1376 s_conflict_set_bitmap_creation_count
1377 -= DRD_(bm_get_bitmap_creation_count)();
1378 s_conflict_set_bitmap2_creation_count
1379 -= DRD_(bm_get_bitmap2_creation_count)();
1383 DRD_(bm_cleanup)(*conflict_set);
1384 DRD_(bm_init)(*conflict_set);
1388 *conflict_set = DRD_(bm_new)();
1391 if (s_trace_conflict_set)
1395 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
1396 VG_(message)(Vg_DebugMsg,
1397 "computing conflict set for thread %d with vc %s\n",
1402 p = DRD_(g_threadinfo)[tid].last;
1406 if (s_trace_conflict_set)
1410 vc = DRD_(vc_aprint)(&p->vc);
1411 VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
1416 for (j = 0; j < DRD_N_THREADS; j++)
1418 if (j != tid && DRD_(IsValidDrdThreadId)(j))
1421 for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
1423 if (! DRD_(vc_lte)(&q->vc, &p->vc)
1424 && ! DRD_(vc_lte)(&p->vc, &q->vc))
1426 if (s_trace_conflict_set)
1430 str = DRD_(vc_aprint)(&q->vc);
1431 VG_(message)(Vg_DebugMsg,
1432 "conflict set: [%d] merging segment %s\n",
1436 DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1440 if (s_trace_conflict_set)
1444 str = DRD_(vc_aprint)(&q->vc);
1445 VG_(message)(Vg_DebugMsg,
1446 "conflict set: [%d] ignoring segment %s\n",
1456 s_conflict_set_bitmap_creation_count
1457 += DRD_(bm_get_bitmap_creation_count)();
1458 s_conflict_set_bitmap2_creation_count
1459 += DRD_(bm_get_bitmap2_creation_count)();
1461 if (s_trace_conflict_set_bm)
1463 VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
1464 DRD_(bm_print)(*conflict_set);
1465 VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
1470 * Update the conflict set after the vector clock of thread tid has been
1471 * updated from old_vc to its current value, either because a new segment has
1472 * been created or because of a synchronization operation.
1474 void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1475 const VectorClock* const old_vc)
1477 const VectorClock* new_vc;
1481 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1482 && tid != DRD_INVALID_THREADID);
1484 tl_assert(tid == DRD_(g_drd_running_tid));
1485 tl_assert(DRD_(g_conflict_set));
1487 if (s_trace_conflict_set)
1491 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
1492 VG_(message)(Vg_DebugMsg,
1493 "updating conflict set for thread %d with vc %s\n",
1498 new_vc = &DRD_(g_threadinfo)[tid].last->vc;
1500 DRD_(bm_unmark)(DRD_(g_conflict_set));
1502 for (j = 0; j < DRD_N_THREADS; j++)
1506 if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1509 for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
1511 const int included_in_old_conflict_set
1512 = ! DRD_(vc_lte)(&q->vc, old_vc)
1513 && ! DRD_(vc_lte)(old_vc, &q->vc);
1514 const int included_in_new_conflict_set
1515 = ! DRD_(vc_lte)(&q->vc, new_vc)
1516 && ! DRD_(vc_lte)(new_vc, &q->vc);
1517 if (included_in_old_conflict_set != included_in_new_conflict_set)
1519 if (s_trace_conflict_set)
1523 str = DRD_(vc_aprint)(&q->vc);
1524 VG_(message)(Vg_DebugMsg,
1525 "conflict set: [%d] merging segment %s\n", j, str);
1528 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1532 if (s_trace_conflict_set)
1536 str = DRD_(vc_aprint)(&q->vc);
1537 VG_(message)(Vg_DebugMsg,
1538 "conflict set: [%d] ignoring segment %s\n", j, str);
1545 DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1547 p = DRD_(g_threadinfo)[tid].last;
1549 for (j = 0; j < DRD_N_THREADS; j++)
1551 if (j != tid && DRD_(IsValidDrdThreadId)(j))
1554 for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
1556 if (! DRD_(vc_lte)(&q->vc, &p->vc)
1557 && ! DRD_(vc_lte)(&p->vc, &q->vc))
1559 DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1566 DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1568 s_update_conflict_set_count++;
1570 if (s_trace_conflict_set_bm)
1572 VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
1573 DRD_(bm_print)(DRD_(g_conflict_set));
1574 VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
1577 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1580 /** Report the number of context switches performed. */
1581 ULong DRD_(thread_get_context_switch_count)(void)
1583 return s_context_switch_count;
1586 /** Report the number of ordered segments that have been discarded. */
1587 ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1589 return s_discard_ordered_segments_count;
1592 /** Return how many times the conflict set has been updated entirely. */
1593 ULong DRD_(thread_get_compute_conflict_set_count)()
1595 return s_compute_conflict_set_count;
1598 /** Return how many times the conflict set has been updated partially. */
1599 ULong DRD_(thread_get_update_conflict_set_count)(void)
1601 return s_update_conflict_set_count;
1605 * Return how many times the conflict set has been updated partially
1606 * because a new segment has been created.
1608 ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1610 return s_update_conflict_set_new_sg_count;
1614 * Return how many times the conflict set has been updated partially
1615 * because of combining vector clocks due to synchronization operations
1616 * other than reader/writer lock or barrier operations.
1618 ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1620 return s_update_conflict_set_sync_count;
1624 * Return how many times the conflict set has been updated partially
1625 * because of thread joins.
1627 ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1629 return s_update_conflict_set_join_count;
1633 * Return the number of first-level bitmaps that have been created during
1634 * conflict set updates.
1636 ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1638 return s_conflict_set_bitmap_creation_count;
1642 * Return the number of second-level bitmaps that have been created during
1643 * conflict set updates.
1645 ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1647 return s_conflict_set_bitmap2_creation_count;