2 /*--------------------------------------------------------------------*/
3 /*--- Thread scheduling. scheduler.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2010 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
34 Valgrind tries to emulate the kernel's threading as closely as
35 possible. The client does all threading via the normal syscalls
36 (on Linux: clone, etc). Valgrind emulates this by creating exactly
37 the same process structure as would be created without Valgrind.
38 There are no extra threads.
40 The main difference is that Valgrind only allows one client thread
41 to run at once. This is controlled with the CPU Big Lock,
42 "the_BigLock". Any time a thread wants to run client code or
43 manipulate any shared state (which is anything other than its own
44 ThreadState entry), it must hold the_BigLock.
46 When a thread is about to block in a blocking syscall, it releases
47 the_BigLock, and re-takes it when it becomes runnable again (either
48 because the syscall finished, or we took a signal).
50 VG_(scheduler) therefore runs in each thread. It returns only when
51 the thread is exiting, either because it exited itself, or it was
52 told to exit by another thread.
54 This file is almost entirely OS-independent. The details of how
55 the OS handles threading and signalling are abstracted away and
56 implemented elsewhere. [Some of the functions have worked their
57 way back for the moment, until we do an OS port in earnest...]
60 #include "pub_core_basics.h"
61 #include "pub_core_debuglog.h"
62 #include "pub_core_vki.h"
63 #include "pub_core_vkiscnums.h" // __NR_sched_yield
64 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
65 #include "pub_core_threadstate.h"
66 #include "pub_core_aspacemgr.h"
67 #include "pub_core_clreq.h" // for VG_USERREQ__*
68 #include "pub_core_dispatch.h"
69 #include "pub_core_errormgr.h" // For VG_(get_n_errs_found)()
70 #include "pub_core_gdbserver.h" // for VG_(gdbserver) and VG_(gdbserver_activity)
71 #include "pub_core_libcbase.h"
72 #include "pub_core_libcassert.h"
73 #include "pub_core_libcprint.h"
74 #include "pub_core_libcproc.h"
75 #include "pub_core_libcsignal.h"
76 #if defined(VGO_darwin)
77 #include "pub_core_mach.h"
79 #include "pub_core_machine.h"
80 #include "pub_core_mallocfree.h"
81 #include "pub_core_options.h"
82 #include "pub_core_replacemalloc.h"
83 #include "pub_core_signals.h"
84 #include "pub_core_stacks.h"
85 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
86 #include "pub_core_syscall.h"
87 #include "pub_core_syswrap.h"
88 #include "pub_core_tooliface.h"
89 #include "pub_core_translate.h" // For VG_(translate)()
90 #include "pub_core_transtab.h"
91 #include "pub_core_debuginfo.h" // VG_(di_notify_pdb_debuginfo)
92 #include "priv_sema.h"
93 #include "pub_core_scheduler.h" // self
94 #include "pub_core_redir.h"
96 /* ---------------------------------------------------------------------
97 Types and globals for the scheduler.
98 ------------------------------------------------------------------ */
100 /* ThreadId and ThreadState are defined elsewhere*/
102 /* Defines the thread-scheduling timeslice, in terms of the number of
103 basic blocks we attempt to run each thread for. Smaller values
104 give finer interleaving but much increased scheduling overheads. */
105 #define SCHEDULING_QUANTUM 100000
107 /* If False, a fault is Valgrind-internal (ie, a bug) */
108 Bool VG_(in_generated_code) = False;
110 /* Counts downwards in VG_(run_innerloop). */
111 UInt VG_(dispatch_ctr);
113 /* 64-bit counter for the number of basic blocks done. */
114 static ULong bbs_done = 0;
116 /* Counter to see if vgdb activity is to be verified.
117 When nr of bbs done reaches vgdb_next_poll, scheduler will
118 poll for gdbserver activity. VG_(force_vgdb_poll) and
119 VG_(disable_vgdb_poll) allows the valgrind core (e.g. m_gdbserver)
120 to control when the next poll will be done. */
121 static ULong vgdb_next_poll;
124 static void do_client_request ( ThreadId tid );
125 static void scheduler_sanity ( ThreadId tid );
126 static void mostly_clear_thread_record ( ThreadId tid );
129 static ULong n_scheduling_events_MINOR = 0;
130 static ULong n_scheduling_events_MAJOR = 0;
132 /* Sanity checking counts. */
133 static UInt sanity_fast_count = 0;
134 static UInt sanity_slow_count = 0;
136 void VG_(print_scheduler_stats)(void)
138 VG_(message)(Vg_DebugMsg,
139 "scheduler: %'llu jumps (bb entries).\n", bbs_done );
140 VG_(message)(Vg_DebugMsg,
141 "scheduler: %'llu/%'llu major/minor sched events.\n",
142 n_scheduling_events_MAJOR, n_scheduling_events_MINOR);
143 VG_(message)(Vg_DebugMsg,
144 " sanity: %d cheap, %d expensive checks.\n",
145 sanity_fast_count, sanity_slow_count );
148 /* CPU semaphore, so that threads can run exclusively */
149 #if !defined(VGO_l4re) // L4Re uses it outside as well
152 vg_sema_t the_BigLock;
155 /* ---------------------------------------------------------------------
156 Helper functions for the scheduler.
157 ------------------------------------------------------------------ */
160 void print_sched_event ( ThreadId tid, Char* what )
162 VG_(message)(Vg_DebugMsg, " SCHED[%d]: %s\n", tid, what );
166 HChar* name_of_sched_event ( UInt event )
169 case VEX_TRC_JMP_SYS_SYSCALL: return "SYSCALL";
170 case VEX_TRC_JMP_SYS_INT32: return "INT32";
171 case VEX_TRC_JMP_SYS_INT128: return "INT128";
172 case VEX_TRC_JMP_SYS_INT129: return "INT129";
173 case VEX_TRC_JMP_SYS_INT130: return "INT130";
174 case VEX_TRC_JMP_SYS_SYSENTER: return "SYSENTER";
175 case VEX_TRC_JMP_CLIENTREQ: return "CLIENTREQ";
176 case VEX_TRC_JMP_YIELD: return "YIELD";
177 case VEX_TRC_JMP_NODECODE: return "NODECODE";
178 case VEX_TRC_JMP_MAPFAIL: return "MAPFAIL";
179 case VEX_TRC_JMP_NOREDIR: return "NOREDIR";
180 case VEX_TRC_JMP_EMWARN: return "EMWARN";
181 case VEX_TRC_JMP_TINVAL: return "TINVAL";
182 case VG_TRC_INVARIANT_FAILED: return "INVFAILED";
183 case VG_TRC_INNER_COUNTERZERO: return "COUNTERZERO";
184 case VG_TRC_INNER_FASTMISS: return "FASTMISS";
185 case VG_TRC_FAULT_SIGNAL: return "FAULTSIGNAL";
187 case VEX_TRC_JMP_L4_UTCB_EAX: return "L4UTCB EAX";
188 case VEX_TRC_JMP_L4_UTCB_EBX: return "L4UTCB EAX";
189 case VEX_TRC_JMP_L4_UTCB_ECX: return "L4UTCB EAX";
190 case VEX_TRC_JMP_L4_UTCB_EDX: return "L4UTCB EAX";
191 case VEX_TRC_JMP_SYS_INT48: return "INT 0x30";
192 case VEX_TRC_JMP_SYS_INT50: return "INT 0x32";
193 case VEX_TRC_JMP_SIGTRAP: return "SIGTRAP (INT3)";
194 case VEX_TRC_JMP_L4_UD2: return "L4: UD2";
195 case VEX_TRC_JMP_L4_ARTIFICIAL: return "L4: Artificial";
197 default: return "??UNKNOWN??";
201 /* Allocate a completely empty ThreadState record. */
202 ThreadId VG_(alloc_ThreadState) ( void )
205 for (i = 1; i < VG_N_THREADS; i++) {
206 if (VG_(threads)[i].status == VgTs_Empty) {
207 VG_(threads)[i].status = VgTs_Init;
208 VG_(threads)[i].exitreason = VgSrc_None;
209 #if defined(VGO_l4re)
210 VG_(debugLog)(1, "sched", "Initializing ThreadState %d\n", i);
212 VG_(memset)(ts_utcb_copy(&VG_(threads)[i]), 0, L4RE_UTCB_SIZE);
214 VG_(threads)[i].os_state.utcb = (l4_utcb_t *)ts_utcb_copy(&VG_(threads)[i]);
217 // copy current utcb as initial utcb into thread state
218 l4_utcb_t *utcb = l4_utcb_wrap();
219 VG_(memcpy)(ts_utcb(&VG_(threads)[i]), utcb, L4RE_UTCB_SIZE);
225 VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
226 VG_(printf)("Increase VG_N_THREADS, rebuild and try again.\n");
227 VG_(core_panic)("VG_N_THREADS is too low");
232 Mark a thread as Runnable. This will block until the_BigLock is
233 available, so that we get exclusive access to all the shared
234 structures and the CPU. Up until we get the_BigLock, we must not
235 touch any shared state.
237 When this returns, we'll actually be running.
239 void VG_(acquire_BigLock)(ThreadId tid, HChar* who)
244 if (VG_(clo_trace_sched)) {
246 vg_assert(VG_(strlen)(who) <= 100-50);
247 VG_(sprintf)(buf, "waiting for lock (%s)", who);
248 print_sched_event(tid, buf);
252 /* First, acquire the_BigLock. We can't do anything else safely
253 prior to this point. Even doing debug printing prior to this
254 point is, technically, wrong. */
255 ML_(sema_down)(&the_BigLock, False/*not LL*/);
257 tst = VG_(get_ThreadState)(tid);
259 vg_assert(tst->status != VgTs_Runnable);
261 tst->status = VgTs_Runnable;
263 if (VG_(running_tid) != VG_INVALID_THREADID)
264 VG_(printf)("tid %d found %d running\n", tid, VG_(running_tid));
265 vg_assert(VG_(running_tid) == VG_INVALID_THREADID);
266 VG_(running_tid) = tid;
268 { Addr gsp = VG_(get_SP)(tid);
269 VG_(unknown_SP_update)(gsp, gsp, 0/*unknown origin*/);
272 if (VG_(clo_trace_sched)) {
274 vg_assert(VG_(strlen)(who) <= 150-50);
275 VG_(sprintf)(buf, " acquired lock (%s)", who);
276 print_sched_event(tid, buf);
281 Set a thread into a sleeping state, and give up exclusive access to
282 the CPU. On return, the thread must be prepared to block until it
283 is ready to run again (generally this means blocking in a syscall,
284 but it may mean that we remain in a Runnable state and we're just
285 yielding the CPU to another thread).
287 void VG_(release_BigLock)(ThreadId tid, ThreadStatus sleepstate, HChar* who)
289 ThreadState *tst = VG_(get_ThreadState)(tid);
291 vg_assert(tst->status == VgTs_Runnable);
293 vg_assert(sleepstate == VgTs_WaitSys ||
294 sleepstate == VgTs_Yielding);
296 tst->status = sleepstate;
298 vg_assert(VG_(running_tid) == tid);
299 VG_(running_tid) = VG_INVALID_THREADID;
301 if (VG_(clo_trace_sched)) {
303 vg_assert(VG_(strlen)(who) <= 200-100);
304 VG_(sprintf)(buf, "releasing lock (%s) -> %s",
305 who, VG_(name_of_ThreadStatus)(sleepstate));
306 print_sched_event(tid, buf);
309 /* Release the_BigLock; this will reschedule any runnable
311 ML_(sema_up)(&the_BigLock, False/*not LL*/);
314 /* See pub_core_scheduler.h for description */
315 void VG_(acquire_BigLock_LL) ( HChar* who )
317 ML_(sema_down)(&the_BigLock, True/*LL*/);
320 /* See pub_core_scheduler.h for description */
321 void VG_(release_BigLock_LL) ( HChar* who )
323 ML_(sema_up)(&the_BigLock, True/*LL*/);
327 /* Clear out the ThreadState and release the semaphore. Leaves the
328 ThreadState in VgTs_Zombie state, so that it doesn't get
329 reallocated until the caller is really ready. */
330 void VG_(exit_thread)(ThreadId tid)
332 vg_assert(VG_(is_valid_tid)(tid));
333 vg_assert(VG_(is_running_thread)(tid));
334 vg_assert(VG_(is_exiting)(tid));
336 mostly_clear_thread_record(tid);
337 VG_(running_tid) = VG_INVALID_THREADID;
339 /* There should still be a valid exitreason for this thread */
340 vg_assert(VG_(threads)[tid].exitreason != VgSrc_None);
342 if (VG_(clo_trace_sched))
343 print_sched_event(tid, "release lock in VG_(exit_thread)");
345 ML_(sema_up)(&the_BigLock, False/*not LL*/);
348 /* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
349 out of the syscall and onto doing the next thing, whatever that is.
350 If it isn't blocked in a syscall, has no effect on the thread. */
351 void VG_(get_thread_out_of_syscall)(ThreadId tid)
353 vg_assert(VG_(is_valid_tid)(tid));
354 vg_assert(!VG_(is_running_thread)(tid));
356 if (VG_(threads)[tid].status == VgTs_WaitSys) {
357 if (VG_(clo_trace_signals)) {
358 VG_(message)(Vg_DebugMsg,
359 "get_thread_out_of_syscall zaps tid %d lwp %d\n",
360 tid, VG_(threads)[tid].os_state.lwpid);
362 # if defined(VGO_darwin)
364 // GrP fixme use mach primitives on darwin?
365 // GrP fixme thread_abort_safely?
366 // GrP fixme race for thread with WaitSys set but not in syscall yet?
367 extern kern_return_t thread_abort(mach_port_t);
368 thread_abort(VG_(threads)[tid].os_state.lwpid);
372 __attribute__((unused))
373 Int r = VG_(tkill)(VG_(threads)[tid].os_state.lwpid, VG_SIGVGKILL);
374 /* JRS 2009-Mar-20: should we assert for r==0 (tkill succeeded)?
375 I'm really not sure. Here's a race scenario which argues
376 that we shoudn't; but equally I'm not sure the scenario is
377 even possible, because of constraints caused by the question
378 of who holds the BigLock when.
380 Target thread tid does sys_read on a socket and blocks. This
381 function gets called, and we observe correctly that tid's
382 status is WaitSys but then for whatever reason this function
383 goes very slowly for a while. Then data arrives from
384 wherever, tid's sys_read returns, tid exits. Then we do
385 tkill on tid, but tid no longer exists; tkill returns an
386 error code and the assert fails. */
387 /* vg_assert(r == 0); */
394 Yield the CPU for a short time to let some other thread run.
396 void VG_(vg_yield)(void)
398 ThreadId tid = VG_(running_tid);
400 vg_assert(tid != VG_INVALID_THREADID);
401 vg_assert(VG_(threads)[tid].os_state.lwpid == VG_(gettid)());
403 VG_(release_BigLock)(tid, VgTs_Yielding, "VG_(vg_yield)");
406 Tell the kernel we're yielding.
408 #if defined(VGO_l4re)
409 // l4_thread_yield();
410 l4_thread_switch(L4_INVALID_CAP);
412 VG_(do_syscall0)(__NR_sched_yield);
415 VG_(acquire_BigLock)(tid, "VG_(vg_yield)");
419 /* Set the standard set of blocked signals, used whenever we're not
420 running a client syscall. */
421 static void block_signals(void)
423 #if defined(VGO_l4re)
424 // VG_(unimplemented)("unimplemented function block_signals()");
429 VG_(sigfillset)(&mask);
431 /* Don't block these because they're synchronous */
432 VG_(sigdelset)(&mask, VKI_SIGSEGV);
433 VG_(sigdelset)(&mask, VKI_SIGBUS);
434 VG_(sigdelset)(&mask, VKI_SIGFPE);
435 VG_(sigdelset)(&mask, VKI_SIGILL);
436 VG_(sigdelset)(&mask, VKI_SIGTRAP);
438 /* Can't block these anyway */
439 VG_(sigdelset)(&mask, VKI_SIGSTOP);
440 VG_(sigdelset)(&mask, VKI_SIGKILL);
442 VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
446 static void os_state_clear(ThreadState *tst)
448 tst->os_state.lwpid = 0;
449 tst->os_state.threadgroup = 0;
450 # if defined(VGO_linux)
451 /* no other fields to clear */
452 # elif defined(VGO_aix5)
453 tst->os_state.cancel_async = False;
454 tst->os_state.cancel_disabled = False;
455 tst->os_state.cancel_progress = Canc_NoRequest;
456 # elif defined(VGO_darwin)
457 tst->os_state.post_mach_trap_fn = NULL;
458 tst->os_state.pthread = 0;
459 tst->os_state.func_arg = 0;
460 VG_(memset)(&tst->os_state.child_go, 0, sizeof(tst->os_state.child_go));
461 VG_(memset)(&tst->os_state.child_done, 0, sizeof(tst->os_state.child_done));
462 tst->os_state.wq_jmpbuf_valid = False;
463 tst->os_state.remote_port = 0;
464 tst->os_state.msgh_id = 0;
465 VG_(memset)(&tst->os_state.mach_args, 0, sizeof(tst->os_state.mach_args));
466 # elif defined(VGO_l4re)
467 tst->os_state.utcb = 0;
473 static void os_state_init(ThreadState *tst)
475 tst->os_state.valgrind_stack_base = 0;
476 tst->os_state.valgrind_stack_init_SP = 0;
481 void mostly_clear_thread_record ( ThreadId tid )
483 #if !defined(VGO_l4re)
484 vki_sigset_t savedmask;
487 vg_assert(tid >= 0 && tid < VG_N_THREADS);
488 VG_(cleanup_thread)(&VG_(threads)[tid].arch);
489 VG_(threads)[tid].tid = tid;
491 /* Leave the thread in Zombie, so that it doesn't get reallocated
492 until the caller is finally done with the thread stack. */
493 VG_(threads)[tid].status = VgTs_Zombie;
495 #if !defined(VGO_l4re)
496 VG_(sigemptyset)(&VG_(threads)[tid].sig_mask);
497 VG_(sigemptyset)(&VG_(threads)[tid].tmp_sig_mask);
500 os_state_clear(&VG_(threads)[tid]);
502 /* start with no altstack */
503 VG_(threads)[tid].altstack.ss_sp = (void *)0xdeadbeef;
504 VG_(threads)[tid].altstack.ss_size = 0;
505 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
507 #if !defined(VGO_l4re)
508 VG_(clear_out_queued_signals)(tid, &savedmask);
511 VG_(threads)[tid].sched_jmpbuf_valid = False;
515 Called in the child after fork. If the parent has multiple
516 threads, then we've inherited a VG_(threads) array describing them,
517 but only the thread which called fork() is actually alive in the
518 child. This functions needs to clean up all those other thread
521 Whichever tid in the parent which called fork() becomes the
522 master_tid in the child. That's because the only living slot in
523 VG_(threads) in the child after fork is VG_(threads)[tid], and it
524 would be too hard to try to re-number the thread and relocate the
525 thread state down to VG_(threads)[1].
527 This function also needs to reinitialize the_BigLock, since
528 otherwise we may end up sharing its state with the parent, which
529 would be deeply confusing.
531 static void sched_fork_cleanup(ThreadId me)
534 vg_assert(VG_(running_tid) == me);
536 # if defined(VGO_darwin)
537 // GrP fixme hack reset Mach ports
541 VG_(threads)[me].os_state.lwpid = VG_(gettid)();
542 VG_(threads)[me].os_state.threadgroup = VG_(getpid)();
544 /* clear out all the unused thread slots */
545 for (tid = 1; tid < VG_N_THREADS; tid++) {
547 mostly_clear_thread_record(tid);
548 VG_(threads)[tid].status = VgTs_Empty;
549 VG_(clear_syscallInfo)(tid);
553 /* re-init and take the sema */
554 ML_(sema_deinit)(&the_BigLock);
555 ML_(sema_init)(&the_BigLock);
556 ML_(sema_down)(&the_BigLock, False/*not LL*/);
560 /* First phase of initialisation of the scheduler. Initialise the
561 bigLock, zeroise the VG_(threads) structure and decide on the
562 ThreadId of the root thread.
564 ThreadId VG_(scheduler_init_phase1) ( void )
569 VG_(debugLog)(1,"sched","sched_init_phase1\n");
571 ML_(sema_init)(&the_BigLock);
573 for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
574 /* Paranoia .. completely zero it out. */
575 VG_(memset)( & VG_(threads)[i], 0, sizeof( VG_(threads)[i] ) );
577 VG_(threads)[i].sig_queue = NULL;
579 os_state_init(&VG_(threads)[i]);
580 mostly_clear_thread_record(i);
582 VG_(threads)[i].status = VgTs_Empty;
583 VG_(threads)[i].client_stack_szB = 0;
584 VG_(threads)[i].client_stack_highest_word = (Addr)NULL;
587 tid_main = VG_(alloc_ThreadState)();
589 /* Bleh. Unfortunately there are various places in the system that
590 assume that the main thread has a ThreadId of 1.
591 - Helgrind (possibly)
592 - stack overflow message in default_action() in m_signals.c
593 - definitely a lot more places
595 vg_assert(tid_main == 1);
601 /* Second phase of initialisation of the scheduler. Given the root
602 ThreadId computed by first phase of initialisation, fill in stack
603 details and acquire bigLock. Initialise the scheduler. This is
604 called at startup. The caller subsequently initialises the guest
605 state components of this main thread.
607 void VG_(scheduler_init_phase2) ( ThreadId tid_main,
611 VG_(debugLog)(1,"sched","sched_init_phase2: tid_main=%d, "
612 "cls_end=0x%lx, cls_sz=%ld\n",
613 tid_main, clstack_end, clstack_size);
615 vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1));
616 vg_assert(VG_IS_PAGE_ALIGNED(clstack_size));
618 VG_(threads)[tid_main].client_stack_highest_word
619 = clstack_end + 1 - sizeof(UWord);
620 VG_(threads)[tid_main].client_stack_szB
623 VG_(atfork)(NULL, NULL, sched_fork_cleanup);
627 /* ---------------------------------------------------------------------
628 Helpers for running translations.
629 ------------------------------------------------------------------ */
631 /* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
632 mask state, but does need to pass "val" through. */
633 #define SCHEDSETJMP(tid, jumped, stmt) \
635 ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
637 (jumped) = VG_MINIMAL_SETJMP(_qq_tst->sched_jmpbuf); \
638 if ((jumped) == 0) { \
639 vg_assert(!_qq_tst->sched_jmpbuf_valid); \
640 _qq_tst->sched_jmpbuf_valid = True; \
642 } else if (VG_(clo_trace_sched)) \
643 VG_(printf)("SCHEDSETJMP(line %d) tid %d, jumped=%d\n", \
644 __LINE__, tid, jumped); \
645 vg_assert(_qq_tst->sched_jmpbuf_valid); \
646 _qq_tst->sched_jmpbuf_valid = False; \
650 /* Do various guest state alignment checks prior to running a thread.
651 Specifically, check that what we have matches Vex's guest state
652 layout requirements. See libvex.h for details, but in short the
653 requirements are: There must be no holes in between the primary
654 guest state, its two copies, and the spill area. In short, all 4
655 areas must have a 16-aligned size and be 16-aligned, and placed
657 static void do_pre_run_checks ( ThreadState* tst )
659 Addr a_vex = (Addr) & tst->arch.vex;
660 Addr a_vexsh1 = (Addr) & tst->arch.vex_shadow1;
661 Addr a_vexsh2 = (Addr) & tst->arch.vex_shadow2;
662 Addr a_spill = (Addr) & tst->arch.vex_spill;
663 UInt sz_vex = (UInt) sizeof tst->arch.vex;
664 UInt sz_vexsh1 = (UInt) sizeof tst->arch.vex_shadow1;
665 UInt sz_vexsh2 = (UInt) sizeof tst->arch.vex_shadow2;
666 UInt sz_spill = (UInt) sizeof tst->arch.vex_spill;
669 VG_(printf)("gst %p %d, sh1 %p %d, "
670 "sh2 %p %d, spill %p %d\n",
671 (void*)a_vex, sz_vex,
672 (void*)a_vexsh1, sz_vexsh1,
673 (void*)a_vexsh2, sz_vexsh2,
674 (void*)a_spill, sz_spill );
676 vg_assert(VG_IS_16_ALIGNED(sz_vex));
677 vg_assert(VG_IS_16_ALIGNED(sz_vexsh1));
678 vg_assert(VG_IS_16_ALIGNED(sz_vexsh2));
679 vg_assert(VG_IS_16_ALIGNED(sz_spill));
681 vg_assert(VG_IS_16_ALIGNED(a_vex));
682 vg_assert(VG_IS_16_ALIGNED(a_vexsh1));
683 vg_assert(VG_IS_16_ALIGNED(a_vexsh2));
684 vg_assert(VG_IS_16_ALIGNED(a_spill));
686 /* Check that the guest state and its two shadows have the same
687 size, and that there are no holes in between. The latter is
688 important because Memcheck assumes that it can reliably access
689 the shadows by indexing off a pointer to the start of the
690 primary guest state area. */
691 vg_assert(sz_vex == sz_vexsh1);
692 vg_assert(sz_vex == sz_vexsh2);
693 vg_assert(a_vex + 1 * sz_vex == a_vexsh1);
694 vg_assert(a_vex + 2 * sz_vex == a_vexsh2);
695 /* Also check there's no hole between the second shadow area and
697 vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
698 vg_assert(a_vex + 3 * sz_vex == a_spill);
700 # if defined(VGA_amd64)
701 /* x86/amd64 XMM regs must form an array, ie, have no
704 (offsetof(VexGuestAMD64State,guest_XMM16)
705 - offsetof(VexGuestAMD64State,guest_XMM0))
706 == (17/*#regs*/-1) * 16/*bytes per reg*/
710 # if defined(VGA_ppc32) || defined(VGA_ppc64)
711 /* ppc guest_state vector regs must be 16 byte aligned for
712 loads/stores. This is important! */
713 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR0));
714 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR0));
715 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR0));
716 /* be extra paranoid .. */
717 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR1));
718 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR1));
719 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR1));
722 # if defined(VGA_arm)
723 /* arm guest_state VFP regs must be 8 byte aligned for
725 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_D0));
726 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_D0));
727 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_D0));
728 /* be extra paranoid .. */
729 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_D1));
730 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_D1));
731 vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_D1));
734 # if defined(VGA_s390x)
735 /* no special requirements */
739 // NO_VGDB_POLL value ensures vgdb is not polled, while
740 // VGDB_POLL_ASAP ensures that the next scheduler call
741 // will cause a poll.
742 #define NO_VGDB_POLL 0xffffffffffffffffULL
743 #define VGDB_POLL_ASAP 0x0ULL
745 void VG_(disable_vgdb_poll) (void )
747 vgdb_next_poll = NO_VGDB_POLL;
749 void VG_(force_vgdb_poll) ( void )
751 vgdb_next_poll = VGDB_POLL_ASAP;
754 /* Run the thread tid for a while, and return a VG_TRC_* value
755 indicating why VG_(run_innerloop) stopped. */
756 static UInt run_thread_for_a_while ( ThreadId tid )
759 volatile ThreadState* tst = NULL; /* stop gcc complaining */
761 volatile Int dispatch_ctr_SAVED;
762 volatile Int done_this_time;
765 vg_assert(VG_(is_valid_tid)(tid));
766 vg_assert(VG_(is_running_thread)(tid));
767 vg_assert(!VG_(is_exiting)(tid));
769 tst = VG_(get_ThreadState)(tid);
770 do_pre_run_checks( (ThreadState*)tst );
774 dispatch_ctr_SAVED = VG_(dispatch_ctr);
776 # if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
777 /* On AIX, we need to get a plausible value for SPRG3 for this
778 thread, since it's used I think as a thread-state pointer. It
779 is presumably set by the kernel for each dispatched thread and
780 cannot be changed by user space. It therefore seems safe enough
781 to copy the host's value of it into the guest state at the point
782 the thread is dispatched.
783 (Later): Hmm, looks like SPRG3 is only used in 32-bit mode.
786 __asm__ __volatile__( "mfspr %0,259\n" : "=b"(host_sprg3) );
787 VG_(threads)[tid].arch.vex.guest_SPRG3_RO = host_sprg3;
788 vg_assert(sizeof(VG_(threads)[tid].arch.vex.guest_SPRG3_RO) == sizeof(void*));
792 /* there should be no undealt-with signals */
793 //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
797 Int i, err = VG_(sigprocmask)(VKI_SIG_SETMASK, NULL, &m);
799 VG_(printf)("tid %d: entering code with unblocked signals: ", tid);
800 for (i = 1; i <= _VKI_NSIG; i++)
801 if (!VG_(sigismember)(&m, i))
802 VG_(printf)("%d ", i);
806 // Tell the tool this thread is about to run client code
807 VG_TRACK( start_client_code, tid, bbs_done );
809 vg_assert(VG_(in_generated_code) == False);
810 VG_(in_generated_code) = True;
811 #if defined(VGO_l4re)
812 #if defined(L4RE_DEBUG_EXECUTION)
813 //VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
814 VG_(debugLog)(0, "sched", "bbs_done=%lld ip = %p guest ip = %p\n",
815 bbs_done, (void *)VG_(get_IP)(tid),
816 (void*)&tst->arch.vex.guest_EIP);
822 trc = (UInt)VG_(run_innerloop)( (void*)&tst->arch.vex,
823 VG_(clo_profile_flags) > 0 ? 1 : 0 )
826 vg_assert(VG_(in_generated_code) == True);
827 VG_(in_generated_code) = False;
830 /* We get here if the client took a fault that caused our signal
831 handler to longjmp. */
833 trc = VG_TRC_FAULT_SIGNAL;
834 #if !defined(VGO_l4re)
839 done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0;
841 vg_assert(done_this_time >= 0);
842 bbs_done += (ULong)done_this_time;
844 // Tell the tool this thread has stopped running client code
845 VG_TRACK( stop_client_code, tid, bbs_done );
847 if (bbs_done >= vgdb_next_poll) {
848 if (VG_(clo_vgdb_poll))
849 vgdb_next_poll = bbs_done + (ULong)VG_(clo_vgdb_poll);
851 /* value was changed due to gdbserver invocation via ptrace */
852 vgdb_next_poll = NO_VGDB_POLL;
853 if (VG_(gdbserver_activity) (tid))
854 VG_(gdbserver) (tid);
861 /* Run a no-redir translation just once, and return the resulting
863 static UInt run_noredir_translation ( Addr hcode, ThreadId tid )
866 volatile ThreadState* tst;
867 volatile UWord argblock[4];
868 volatile UInt retval;
871 vg_assert(VG_(is_valid_tid)(tid));
872 vg_assert(VG_(is_running_thread)(tid));
873 vg_assert(!VG_(is_exiting)(tid));
875 tst = VG_(get_ThreadState)(tid);
876 do_pre_run_checks( (ThreadState*)tst );
879 # if defined(VGA_ppc32) || defined(VGA_ppc64)
880 /* I don't think we need to clear this thread's guest_RESVN here,
881 because we can only get here if run_thread_for_a_while() has
882 been used immediately before, on this same thread. */
885 /* There can be 3 outcomes from VG_(run_a_noredir_translation):
887 - a signal occurred and the sighandler longjmp'd. Then both [2]
888 and [3] are unchanged - hence zero.
890 - translation ran normally, set [2] (next guest IP) and set [3]
891 to whatever [1] was beforehand, indicating a normal (boring)
892 jump to the next block.
894 - translation ran normally, set [2] (next guest IP) and set [3]
895 to something different from [1] beforehand, which indicates a
898 argblock[0] = (UWord)hcode;
899 argblock[1] = (UWord)&VG_(threads)[tid].arch.vex;
900 argblock[2] = 0; /* next guest IP is written here */
901 argblock[3] = 0; /* guest state ptr afterwards is written here */
903 // Tell the tool this thread is about to run client code
904 VG_TRACK( start_client_code, tid, bbs_done );
906 vg_assert(VG_(in_generated_code) == False);
907 VG_(in_generated_code) = True;
912 VG_(run_a_noredir_translation)( &argblock[0] )
915 VG_(in_generated_code) = False;
918 /* We get here if the client took a fault that caused our signal
919 handler to longjmp. */
920 vg_assert(argblock[2] == 0); /* next guest IP was not written */
921 vg_assert(argblock[3] == 0); /* trc was not written */
922 #if !defined(VGO_l4re)
925 retval = VG_TRC_FAULT_SIGNAL;
927 /* store away the guest program counter */
928 VG_(set_IP)( tid, argblock[2] );
929 if (argblock[3] == argblock[1])
930 /* the guest state pointer afterwards was unchanged */
931 retval = VG_TRC_BORING;
933 retval = (UInt)argblock[3];
938 // Tell the tool this thread has stopped running client code
939 VG_TRACK( stop_client_code, tid, bbs_done );
944 ULong VG_(bbs_done) (void)
950 /* ---------------------------------------------------------------------
951 The scheduler proper.
952 ------------------------------------------------------------------ */
954 static void handle_tt_miss ( ThreadId tid )
957 Addr ip = VG_(get_IP)(tid);
959 /* Trivial event. Miss in the fast-cache. Do a full
961 found = VG_(search_transtab)( NULL, ip, True/*upd_fast_cache*/ );
962 if (UNLIKELY(!found)) {
963 #if defined(VGO_l4re)
964 #if defined(L4RE_DEBUG_EXECUTION)
965 //VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
966 VG_(debugLog)(0, "sched","tid=%d, ip=%p, bbs_done=%lld\n", tid, (void *)ip, bbs_done);
967 // (VG_(translate)( tid, ip, /*debug*/True, 1 /*0xffffffff*/ /*0*//* verbose*/,
968 // bbs_done, True/*allow redirection*/ ));
971 /* Not found; we need to request a translation. */
972 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
973 bbs_done, True/*allow redirection*/ )) {
974 found = VG_(search_transtab)( NULL, ip, True );
975 vg_assert2(found, "VG_TRC_INNER_FASTMISS: missing tt_fast entry");
978 // If VG_(translate)() fails, it's because it had to throw a
979 // signal because the client jumped to a bad address. That
980 // means that either a signal has been set up for delivery,
981 // or the thread has been marked for termination. Either
982 // way, we just need to go back into the scheduler loop.
987 static void handle_syscall(ThreadId tid, UInt trc)
989 ThreadState * volatile tst = VG_(get_ThreadState)(tid);
992 /* Syscall may or may not block; either way, it will be
993 complete by the time this call returns, and we'll be
994 runnable again. We could take a signal while the
997 if (VG_(clo_sanity_level >= 3))
998 VG_(am_do_sync_check)("(BEFORE SYSCALL)",__FILE__,__LINE__);
1000 SCHEDSETJMP(tid, jumped, VG_(client_syscall)(tid, trc));
1002 if (VG_(clo_sanity_level >= 3))
1003 VG_(am_do_sync_check)("(AFTER SYSCALL)",__FILE__,__LINE__);
1005 if (!VG_(is_running_thread)(tid))
1006 VG_(printf)("tid %d not running; VG_(running_tid)=%d, tid %d status %d\n",
1007 tid, VG_(running_tid), tid, tst->status);
1008 vg_assert(VG_(is_running_thread)(tid));
1010 #if !defined(VGO_l4re)
1013 VG_(poll_signals)(tid);
1018 /* tid just requested a jump to the noredir version of its current
1019 program counter. So make up that translation if needed, run it,
1020 and return the resulting thread return code. */
1021 static UInt/*trc*/ handle_noredir_jump ( ThreadId tid )
1024 Addr ip = VG_(get_IP)(tid);
1026 Bool found = VG_(search_unredir_transtab)( &hcode, ip );
1028 /* Not found; we need to request a translation. */
1029 if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done,
1030 False/*NO REDIRECTION*/ )) {
1032 found = VG_(search_unredir_transtab)( &hcode, ip );
1033 vg_assert2(found, "unredir translation missing after creation?!");
1036 // If VG_(translate)() fails, it's because it had to throw a
1037 // signal because the client jumped to a bad address. That
1038 // means that either a signal has been set up for delivery,
1039 // or the thread has been marked for termination. Either
1040 // way, we just need to go back into the scheduler loop.
1041 return VG_TRC_BORING;
1047 vg_assert(hcode != 0);
1049 /* Otherwise run it and return the resulting VG_TRC_* value. */
1050 return run_noredir_translation( hcode, tid );
1055 Run a thread until it wants to exit.
1057 We assume that the caller has already called VG_(acquire_BigLock) for
1058 us, so we own the VCPU. Also, all signals are blocked.
1060 VgSchedReturnCode VG_(scheduler) ( ThreadId tid )
1063 ThreadState *tst = VG_(get_ThreadState)(tid);
1064 static Bool vgdb_startup_action_done = False;
1067 if (VG_(clo_trace_sched))
1068 print_sched_event(tid, "entering VG_(scheduler)");
1070 /* Do vgdb initialization (but once). Only the first (main) task
1071 starting up will do the below.
1072 Initialize gdbserver earlier than at the first
1073 thread VG_(scheduler) is causing problems:
1074 * at the end of VG_(scheduler_init_phase2) :
1075 The main thread is in VgTs_Init state, but in a not yet
1076 consistent state => the thread cannot be reported to gdb
1077 (e.g. causes an assert in LibVEX_GuestX86_get_eflags when giving
1078 back the guest registers to gdb).
1079 * at end of valgrind_main, just
1080 before VG_(main_thread_wrapper_NORETURN)(1) :
1081 The main thread is still in VgTs_Init state but in a
1082 more advanced state. However, the thread state is not yet
1083 completely initialized : a.o., the os_state is not yet fully
1084 set => the thread is then not properly reported to gdb,
1085 which is then confused (causing e.g. a duplicate thread be
1086 shown, without thread id).
1087 * it would be possible to initialize gdbserver "lower" in the
1088 call stack (e.g. in VG_(main_thread_wrapper_NORETURN)) but
1089 these are platform dependent and the place at which
1090 the thread state is completely initialized is not
1091 specific anymore to the main thread (so a similar "do it only
1092 once" would be needed).
1094 => a "once only" initialization here is the best compromise. */
1095 if (!vgdb_startup_action_done) {
1096 vg_assert(tid == 1); // it must be the main thread.
1097 vgdb_startup_action_done = True;
1098 if (VG_(clo_vgdb) != Vg_VgdbNo) {
1099 /* If we have to poll, ensures we do an initial poll at first
1100 scheduler call. Otherwise, ensure no poll (unless interrupted
1102 if (VG_(clo_vgdb_poll))
1103 VG_(force_vgdb_poll) ();
1105 VG_(disable_vgdb_poll) ();
1107 vg_assert (VG_(dyn_vgdb_error) == VG_(clo_vgdb_error));
1108 /* As we are initializing, VG_(dyn_vgdb_error) can't have been
1111 VG_(gdbserver_prerun_action) (1);
1113 VG_(disable_vgdb_poll) ();
1117 #if !defined(VGO_l4re)
1118 /* set the proper running signal mask */
1122 vg_assert(VG_(is_running_thread)(tid));
1124 VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1;
1126 while (!VG_(is_exiting)(tid)) {
1128 if (VG_(dispatch_ctr) == 1) {
1130 # if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
1131 /* Note: count runnable threads before dropping The Lock. */
1132 Int rt = VG_(count_runnable_threads)();
1135 /* Our slice is done, so yield the CPU to another thread. On
1136 Linux, this doesn't sleep between sleeping and running,
1137 since that would take too much time. On AIX, we have to
1138 prod the scheduler to get it consider other threads; not
1139 doing so appears to cause very long delays before other
1140 runnable threads get rescheduled. */
1142 /* 4 July 06: it seems that a zero-length nsleep is needed to
1143 cause async thread cancellation (canceller.c) to terminate
1144 in finite time; else it is in some kind of race/starvation
1145 situation and completion is arbitrarily delayed (although
1146 this is not a deadlock).
1148 Unfortunately these sleeps cause MPI jobs not to terminate
1149 sometimes (some kind of livelock). So sleeping once
1150 every N opportunities appears to work. */
1152 /* 3 Aug 06: doing sys__nsleep works but crashes some apps.
1153 sys_yield also helps the problem, whilst not crashing apps. */
1155 VG_(release_BigLock)(tid, VgTs_Yielding,
1156 "VG_(scheduler):timeslice");
1157 /* ------------ now we don't have The Lock ------------ */
1159 # if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
1161 vg_assert(__NR_AIX5__nsleep != __NR_AIX5_UNKNOWN);
1162 vg_assert(__NR_AIX5_yield != __NR_AIX5_UNKNOWN);
1163 if (1 && rt > 0 && ((++ctr % 3) == 0)) {
1164 //struct vki_timespec ts;
1166 //ts.tv_nsec = 0*1000*1000;
1167 //VG_(do_syscall2)(__NR_AIX5__nsleep, (UWord)&ts, (UWord)NULL);
1168 VG_(do_syscall0)(__NR_AIX5_yield);
1173 VG_(acquire_BigLock)(tid, "VG_(scheduler):timeslice");
1174 /* ------------ now we do have The Lock ------------ */
1176 /* OK, do some relatively expensive housekeeping stuff */
1177 scheduler_sanity(tid);
1178 VG_(sanity_check_general)(False);
1180 /* Look for any pending signals for this thread, and set them up
1182 #if !defined(VGO_l4re)
1183 VG_(poll_signals)(tid);
1186 if (VG_(is_exiting)(tid))
1187 break; /* poll_signals picked up a fatal signal */
1189 /* For stats purposes only. */
1190 n_scheduling_events_MAJOR++;
1192 /* Figure out how many bbs to ask vg_run_innerloop to do. Note
1193 that it decrements the counter before testing it for zero, so
1194 that if tst->dispatch_ctr is set to N you get at most N-1
1195 iterations. Also this means that tst->dispatch_ctr must
1196 exceed zero before entering the innerloop. Also also, the
1197 decrement is done before the bb is actually run, so you
1198 always get at least one decrement even if nothing happens. */
1199 VG_(dispatch_ctr) = SCHEDULING_QUANTUM + 1;
1202 vg_assert(tst->tid == tid);
1203 vg_assert(tst->os_state.lwpid == VG_(gettid)());
1206 /* For stats purposes only. */
1207 n_scheduling_events_MINOR++;
1210 VG_(message)(Vg_DebugMsg, "thread %d: running for %d bbs\n",
1211 tid, VG_(dispatch_ctr) - 1 );
1213 trc = run_thread_for_a_while ( tid );
1215 if (VG_(clo_trace_sched) && VG_(clo_verbosity) > 2) {
1217 VG_(sprintf)(buf, "TRC: %s", name_of_sched_event(trc));
1218 print_sched_event(tid, buf);
1221 if (trc == VEX_TRC_JMP_NOREDIR) {
1222 /* If we got a request to run a no-redir version of
1223 something, do so now -- handle_noredir_jump just (creates
1224 and) runs that one translation. The flip side is that the
1225 noredir translation can't itself return another noredir
1226 request -- that would be nonsensical. It can, however,
1227 return VG_TRC_BORING, which just means keep going as
1229 trc = handle_noredir_jump(tid);
1230 vg_assert(trc != VEX_TRC_JMP_NOREDIR);
1235 /* no special event, just keep going. */
1238 case VG_TRC_INNER_FASTMISS:
1239 vg_assert(VG_(dispatch_ctr) > 1);
1240 handle_tt_miss(tid);
1243 case VEX_TRC_JMP_CLIENTREQ:
1244 do_client_request(tid);
1247 #if defined(VGO_l4re)
1251 #define DEBUG_UTCB \
1253 VG_(debugLog)(0, "sched", "utcb access via client " \
1254 "virtual utcb of thread %d @ %p -> eax\n", \
1256 (Addr)ts_utcb(&VG_(threads)[tid]));
1258 #define TRACK_UTCB \
1259 VG_TRACK( new_mem_startup, (Addr)ts_utcb(&VG_(threads)[tid]), L4RE_UTCB_SIZE, 1, 1, 1, 0 );
1261 case VEX_TRC_JMP_L4_UTCB_EAX:
1262 VG_(threads)[tid].arch.vex.guest_EAX = (Addr)ts_utcb(&VG_(threads)[tid]);
1266 case VEX_TRC_JMP_L4_UTCB_EBX:
1267 VG_(threads)[tid].arch.vex.guest_EBX = (Addr)ts_utcb(&VG_(threads)[tid]);
1271 case VEX_TRC_JMP_L4_UTCB_ECX:
1272 VG_(threads)[tid].arch.vex.guest_ECX = (Addr)ts_utcb(&VG_(threads)[tid]);
1276 case VEX_TRC_JMP_L4_UTCB_EDX:
1277 VG_(threads)[tid].arch.vex.guest_EDX = (Addr)ts_utcb(&VG_(threads)[tid]);
1281 case VEX_TRC_JMP_L4_UTCB_EDI:
1282 VG_(threads)[tid].arch.vex.guest_EDI = (Addr)ts_utcb(&VG_(threads)[tid]);
1286 case VEX_TRC_JMP_L4_UTCB_ESI:
1287 VG_(threads)[tid].arch.vex.guest_ESI = (Addr)ts_utcb(&VG_(threads)[tid]);
1291 case VEX_TRC_JMP_SYS_INT48: /* L4Re: Invoke */
1292 case VEX_TRC_JMP_SYS_INT50: /* L4Re: Debug */
1293 case VEX_TRC_JMP_SYS_INT128: /* L4Re/UX: INT80 */
1294 case VEX_TRC_JMP_L4_UD2: /* L4Re: UD2 */
1295 case VEX_TRC_JMP_L4_ARTIFICIAL: /* L4Re: artificial trap */
1296 handle_syscall(tid, trc);
1297 if (VG_(clo_sanity_level) > 2)
1298 VG_(sanity_check_general)(True); /* sanity-check every syscall */
1301 case VEX_TRC_JMP_SYS_INT128: /* x86-linux */
1302 case VEX_TRC_JMP_SYS_INT129: /* x86-darwin */
1303 case VEX_TRC_JMP_SYS_INT130: /* x86-darwin */
1304 case VEX_TRC_JMP_SYS_SYSCALL: /* amd64-linux, ppc32-linux, amd64-darwin */
1305 handle_syscall(tid, trc);
1306 if (VG_(clo_sanity_level) > 2)
1307 VG_(sanity_check_general)(True); /* sanity-check every syscall */
1311 case VEX_TRC_JMP_YIELD:
1312 /* Explicit yield, because this thread is in a spin-lock
1313 or something. Only let the thread run for a short while
1314 longer. Because swapping to another thread is expensive,
1315 we're prepared to let this thread eat a little more CPU
1316 before swapping to another. That means that short term
1317 spins waiting for hardware to poke memory won't cause a
1319 if (VG_(dispatch_ctr) > 2000)
1320 VG_(dispatch_ctr) = 2000;
1323 case VG_TRC_INNER_COUNTERZERO:
1324 /* Timeslice is out. Let a new thread be scheduled. */
1325 vg_assert(VG_(dispatch_ctr) == 1);
1328 case VG_TRC_FAULT_SIGNAL:
1329 /* Everything should be set up (either we're exiting, or
1330 about to start in a signal handler). */
1333 case VEX_TRC_JMP_MAPFAIL:
1334 /* Failure of arch-specific address translation (x86/amd64
1335 segment override use) */
1336 /* jrs 2005 03 11: is this correct? */
1337 VG_(message)(Vg_DebugMsg, "TRC_JMP_MAPFAIL\n");
1338 VG_(synth_fault)(tid);
1341 case VEX_TRC_JMP_EMWARN: {
1342 static Int counts[EmWarn_NUMBER];
1343 static Bool counts_initted = False;
1348 if (!counts_initted) {
1349 counts_initted = True;
1350 for (q = 0; q < EmWarn_NUMBER; q++)
1353 ew = (VexEmWarn)VG_(threads)[tid].arch.vex.guest_EMWARN;
1354 what = (ew < 0 || ew >= EmWarn_NUMBER)
1356 : LibVEX_EmWarn_string(ew);
1357 show = (ew < 0 || ew >= EmWarn_NUMBER)
1360 if (show && VG_(clo_show_emwarns) && !VG_(clo_xml)) {
1361 VG_(message)( Vg_UserMsg,
1362 "Emulation warning: unsupported action:\n");
1363 VG_(message)( Vg_UserMsg, " %s\n", what);
1364 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1369 case VEX_TRC_JMP_EMFAIL: {
1372 ew = (VexEmWarn)VG_(threads)[tid].arch.vex.guest_EMWARN;
1373 what = (ew < 0 || ew >= EmWarn_NUMBER)
1375 : LibVEX_EmWarn_string(ew);
1376 VG_(message)( Vg_UserMsg,
1377 "Emulation fatal error -- Valgrind cannot continue:\n");
1378 VG_(message)( Vg_UserMsg, " %s\n", what);
1379 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1380 VG_(message)(Vg_UserMsg, "\n");
1381 VG_(message)(Vg_UserMsg, "Valgrind has to exit now. Sorry.\n");
1382 VG_(message)(Vg_UserMsg, "\n");
1387 case VEX_TRC_JMP_SIGTRAP:
1388 #if defined(VGO_l4re)
1389 /* In case of l4re int 0x3 means enter_kdebug().*/
1390 handle_syscall(tid, trc);
1392 if (VG_(clo_sanity_level) > 2)
1393 VG_(sanity_check_general)(True); /* sanity-check every syscall */
1395 VG_(synth_sigtrap)(tid);
1399 case VEX_TRC_JMP_SIGSEGV:
1400 VG_(message)(Vg_DebugMsg, "TRC_JMP_SIGSEGV\n");
1401 VG_(synth_fault)(tid);
1404 case VEX_TRC_JMP_SIGBUS:
1405 VG_(synth_sigbus)(tid);
1408 case VEX_TRC_JMP_NODECODE:
1410 "valgrind: Unrecognised instruction at address %#lx.\n",
1412 VG_(get_and_pp_StackTrace)(tid, 50);
1413 #define M(a) VG_(umsg)(a "\n");
1414 M("Your program just tried to execute an instruction that Valgrind" );
1415 M("did not recognise. There are two possible reasons for this." );
1416 M("1. Your program has a bug and erroneously jumped to a non-code" );
1417 M(" location. If you are running Memcheck and you just saw a" );
1418 M(" warning about a bad jump, it's probably your program's fault.");
1419 M("2. The instruction is legitimate but Valgrind doesn't handle it,");
1420 M(" i.e. it's Valgrind's fault. If you think this is the case or");
1421 M(" you are not sure, please let us know and we'll try to fix it.");
1422 M("Either way, Valgrind will now raise a SIGILL signal which will" );
1423 M("probably kill your program." );
1425 VG_(synth_sigill)(tid, VG_(get_IP)(tid));
1428 case VEX_TRC_JMP_TINVAL:
1429 VG_(discard_translations)(
1430 (Addr64)VG_(threads)[tid].arch.vex.guest_TISTART,
1431 VG_(threads)[tid].arch.vex.guest_TILEN,
1432 "scheduler(VEX_TRC_JMP_TINVAL)"
1435 VG_(printf)("dump translations done.\n");
1438 case VG_TRC_INVARIANT_FAILED:
1439 /* This typically happens if, after running generated code,
1440 it is detected that host CPU settings (eg, FPU/Vector
1441 control words) are not as they should be. Vex's code
1442 generation specifies the state such control words should
1443 be in on entry to Vex-generated code, and they should be
1444 unchanged on exit from it. Failure of this assertion
1445 usually means a bug in Vex's code generation. */
1447 // __asm__ __volatile__ (
1448 // "\t.word 0xEEF12A10\n" // fmrx r2,fpscr
1449 // "\tmov %0, r2" : "=r"(xx) : : "r2" );
1450 // VG_(printf)("QQQQ new fpscr = %08x\n", xx);
1452 vg_assert2(0, "VG_(scheduler), phase 3: "
1453 "run_innerloop detected host "
1454 "state invariant failure", trc);
1456 case VEX_TRC_JMP_SYS_SYSENTER:
1457 /* Do whatever simulation is appropriate for an x86 sysenter
1458 instruction. Note that it is critical to set this thread's
1459 guest_EIP to point at the code to execute after the
1460 sysenter, since Vex-generated code will not have set it --
1461 vex does not know what it should be. Vex sets the next
1462 address to zero, so if you don't set guest_EIP, the thread
1463 will jump to zero afterwards and probably die as a result. */
1464 # if defined(VGP_x86_linux)
1465 vg_assert2(0, "VG_(scheduler), phase 3: "
1466 "sysenter_x86 on x86-linux is not supported");
1467 # elif defined(VGP_x86_darwin)
1468 /* return address in client edx */
1469 VG_(threads)[tid].arch.vex.guest_EIP
1470 = VG_(threads)[tid].arch.vex.guest_EDX;
1471 handle_syscall(tid, trc);
1472 # elif defined(VGP_x86_l4re)
1473 /* nearly the same. L4Re stores return EIP in %ebx */
1474 VG_(threads)[tid].arch.vex.guest_EIP
1475 = VG_(threads)[tid].arch.vex.guest_EBX;
1476 handle_syscall(tid, trc);
1478 vg_assert2(0, "VG_(scheduler), phase 3: "
1479 "sysenter_x86 on non-x86 platform?!?!");
1484 vg_assert2(0, "VG_(scheduler), phase 3: "
1485 "unexpected thread return code (%u)", trc);
1489 } /* switch (trc) */
1492 if (VG_(clo_trace_sched))
1493 print_sched_event(tid, "exiting VG_(scheduler)");
1495 vg_assert(VG_(is_exiting)(tid));
1497 return tst->exitreason;
1502 This causes all threads to forceably exit. They aren't actually
1503 dead by the time this returns; you need to call
1504 VG_(reap_threads)() to wait for them.
1506 void VG_(nuke_all_threads_except) ( ThreadId me, VgSchedReturnCode src )
1510 vg_assert(VG_(is_running_thread)(me));
1512 for (tid = 1; tid < VG_N_THREADS; tid++) {
1514 || VG_(threads)[tid].status == VgTs_Empty)
1518 "VG_(nuke_all_threads_except): nuking tid %d\n", tid);
1520 VG_(threads)[tid].exitreason = src;
1521 if (src == VgSrc_FatalSig)
1522 VG_(threads)[tid].os_state.fatalsig = VKI_SIGKILL;
1523 VG_(get_thread_out_of_syscall)(tid);
1528 /* ---------------------------------------------------------------------
1529 Specifying shadow register values
1530 ------------------------------------------------------------------ */
1532 #if defined(VGA_x86)
1533 # define VG_CLREQ_ARGS guest_EAX
1534 # define VG_CLREQ_RET guest_EDX
1535 #elif defined(VGA_amd64)
1536 # define VG_CLREQ_ARGS guest_RAX
1537 # define VG_CLREQ_RET guest_RDX
1538 #elif defined(VGA_ppc32) || defined(VGA_ppc64)
1539 # define VG_CLREQ_ARGS guest_GPR4
1540 # define VG_CLREQ_RET guest_GPR3
1541 #elif defined(VGA_arm)
1542 # define VG_CLREQ_ARGS guest_R4
1543 # define VG_CLREQ_RET guest_R3
1544 #elif defined (VGA_s390x)
1545 # define VG_CLREQ_ARGS guest_r2
1546 # define VG_CLREQ_RET guest_r3
1548 # error Unknown arch
1551 #define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS)
1552 #define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET)
1553 #define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET))
1555 // These macros write a value to a client's thread register, and tell the
1556 // tool that it's happened (if necessary).
1558 #define SET_CLREQ_RETVAL(zztid, zzval) \
1559 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1560 VG_TRACK( post_reg_write, \
1561 Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \
1564 #define SET_CLCALL_RETVAL(zztid, zzval, f) \
1565 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1566 VG_TRACK( post_reg_write_clientcall_return, \
1567 zztid, O_CLREQ_RET, sizeof(UWord), f); \
1571 /* ---------------------------------------------------------------------
1572 Handle client requests.
1573 ------------------------------------------------------------------ */
1575 // OS-specific(?) client requests
1576 static Bool os_client_request(ThreadId tid, UWord *args)
1578 Bool handled = True;
1580 vg_assert(VG_(is_running_thread)(tid));
1583 case VG_USERREQ__LIBC_FREERES_DONE:
1584 /* This is equivalent to an exit() syscall, but we don't set the
1585 exitcode (since it might already be set) */
1586 if (0 || VG_(clo_trace_syscalls) || VG_(clo_trace_sched))
1587 VG_(message)(Vg_DebugMsg,
1588 "__libc_freeres() done; really quitting!\n");
1589 VG_(threads)[tid].exitreason = VgSrc_ExitThread;
1601 /* Do a client request for the thread tid. After the request, tid may
1602 or may not still be runnable; if not, the scheduler will have to
1603 choose a new thread to run.
1606 void do_client_request ( ThreadId tid )
1608 UWord* arg = (UWord*)(CLREQ_ARGS(VG_(threads)[tid].arch));
1609 UWord req_no = arg[0];
1612 VG_(printf)("req no = 0x%llx, arg = %p\n", (ULong)req_no, arg);
1615 case VG_USERREQ__CLIENT_CALL0: {
1616 UWord (*f)(ThreadId) = (void*)arg[1];
1618 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f);
1620 SET_CLCALL_RETVAL(tid, f ( tid ), (Addr)f);
1623 case VG_USERREQ__CLIENT_CALL1: {
1624 UWord (*f)(ThreadId, UWord) = (void*)arg[1];
1626 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f);
1628 SET_CLCALL_RETVAL(tid, f ( tid, arg[2] ), (Addr)f );
1631 case VG_USERREQ__CLIENT_CALL2: {
1632 UWord (*f)(ThreadId, UWord, UWord) = (void*)arg[1];
1634 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f);
1636 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3] ), (Addr)f );
1639 case VG_USERREQ__CLIENT_CALL3: {
1640 UWord (*f)(ThreadId, UWord, UWord, UWord) = (void*)arg[1];
1642 VG_(message)(Vg_DebugMsg, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f);
1644 SET_CLCALL_RETVAL(tid, f ( tid, arg[2], arg[3], arg[4] ), (Addr)f );
1648 // Nb: this looks like a circular definition, because it kind of is.
1649 // See comment in valgrind.h to understand what's going on.
1650 case VG_USERREQ__RUNNING_ON_VALGRIND:
1651 SET_CLREQ_RETVAL(tid, RUNNING_ON_VALGRIND+1);
1654 case VG_USERREQ__PRINTF: {
1655 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1656 _VALIST_BY_REF version instead */
1657 if (sizeof(va_list) != sizeof(UWord))
1658 goto va_list_casting_error_NORETURN;
1663 u.uw = (unsigned long)arg[2];
1665 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], u.vargs );
1666 VG_(message_flush)();
1667 SET_CLREQ_RETVAL( tid, count );
1671 case VG_USERREQ__PRINTF_BACKTRACE: {
1672 /* JRS 2010-Jan-28: this is DEPRECATED; use the
1673 _VALIST_BY_REF version instead */
1674 if (sizeof(va_list) != sizeof(UWord))
1675 goto va_list_casting_error_NORETURN;
1680 u.uw = (unsigned long)arg[2];
1682 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], u.vargs );
1683 VG_(message_flush)();
1684 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1685 SET_CLREQ_RETVAL( tid, count );
1689 case VG_USERREQ__PRINTF_VALIST_BY_REF: {
1690 va_list* vargsp = (va_list*)arg[2];
1692 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], *vargsp );
1693 VG_(message_flush)();
1694 SET_CLREQ_RETVAL( tid, count );
1698 case VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF: {
1699 va_list* vargsp = (va_list*)arg[2];
1701 VG_(vmessage)( Vg_ClientMsg, (char *)arg[1], *vargsp );
1702 VG_(message_flush)();
1703 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1704 SET_CLREQ_RETVAL( tid, count );
1708 case VG_USERREQ__INTERNAL_PRINTF_VALIST_BY_REF: {
1709 va_list* vargsp = (va_list*)arg[2];
1711 VG_(vmessage)( Vg_DebugMsg, (char *)arg[1], *vargsp );
1712 VG_(message_flush)();
1713 SET_CLREQ_RETVAL( tid, count );
1717 case VG_USERREQ__ADD_IFUNC_TARGET: {
1718 VG_(redir_add_ifunc_target)( arg[1], arg[2] );
1719 SET_CLREQ_RETVAL( tid, 0);
1722 case VG_USERREQ__STACK_REGISTER: {
1723 UWord sid = VG_(register_stack)((Addr)arg[1], (Addr)arg[2]);
1724 SET_CLREQ_RETVAL( tid, sid );
1727 case VG_USERREQ__STACK_DEREGISTER: {
1728 VG_(deregister_stack)(arg[1]);
1729 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1732 case VG_USERREQ__STACK_CHANGE: {
1733 VG_(change_stack)(arg[1], (Addr)arg[2], (Addr)arg[3]);
1734 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1737 case VG_USERREQ__GET_MALLOCFUNCS: {
1738 struct vg_mallocfunc_info *info = (struct vg_mallocfunc_info *)arg[1];
1740 info->tl_malloc = VG_(tdict).tool_malloc;
1741 info->tl_calloc = VG_(tdict).tool_calloc;
1742 info->tl_realloc = VG_(tdict).tool_realloc;
1743 info->tl_memalign = VG_(tdict).tool_memalign;
1744 info->tl___builtin_new = VG_(tdict).tool___builtin_new;
1745 info->tl___builtin_vec_new = VG_(tdict).tool___builtin_vec_new;
1746 info->tl_free = VG_(tdict).tool_free;
1747 info->tl___builtin_delete = VG_(tdict).tool___builtin_delete;
1748 info->tl___builtin_vec_delete = VG_(tdict).tool___builtin_vec_delete;
1749 info->tl_malloc_usable_size = VG_(tdict).tool_malloc_usable_size;
1751 info->mallinfo = VG_(mallinfo);
1752 info->clo_trace_malloc = VG_(clo_trace_malloc);
1754 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1759 /* Requests from the client program */
1761 case VG_USERREQ__DISCARD_TRANSLATIONS:
1762 if (VG_(clo_verbosity) > 2)
1763 VG_(printf)( "client request: DISCARD_TRANSLATIONS,"
1764 " addr %p, len %lu\n",
1765 (void*)arg[1], arg[2] );
1767 VG_(discard_translations)(
1768 arg[1], arg[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)"
1771 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1774 case VG_USERREQ__COUNT_ERRORS:
1775 SET_CLREQ_RETVAL( tid, VG_(get_n_errs_found)() );
1778 case VG_USERREQ__LOAD_PDB_DEBUGINFO:
1779 VG_(di_notify_pdb_debuginfo)( arg[1], arg[2], arg[3], arg[4] );
1780 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1783 case VG_USERREQ__MAP_IP_TO_SRCLOC: {
1785 UChar* buf64 = (UChar*)arg[2];
1787 VG_(memset)(buf64, 0, 64);
1789 Bool ok = VG_(get_filename_linenum)(
1790 ip, &buf64[0], 50, NULL, 0, NULL, &linenum
1793 /* Find the terminating zero in the first 50 bytes. */
1795 for (i = 0; i < 50; i++) {
1799 /* We must find a zero somewhere in 0 .. 49. Else
1800 VG_(get_filename_linenum) is not properly zero
1803 VG_(sprintf)(&buf64[i], ":%u", linenum);
1808 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1812 case VG_USERREQ__MALLOCLIKE_BLOCK:
1813 case VG_USERREQ__RESIZEINPLACE_BLOCK:
1814 case VG_USERREQ__FREELIKE_BLOCK:
1815 // Ignore them if the addr is NULL; otherwise pass onto the tool.
1817 SET_CLREQ_RETVAL( tid, 0 ); /* return value is meaningless */
1825 if (os_client_request(tid, arg)) {
1826 // do nothing, os_client_request() handled it
1827 } else if (VG_(needs).client_requests) {
1830 if (VG_(clo_verbosity) > 2)
1831 VG_(printf)("client request: code %lx, addr %p, len %lu\n",
1832 arg[0], (void*)arg[1], arg[2] );
1834 if ( VG_TDICT_CALL(tool_handle_client_request, tid, arg, &ret) )
1835 SET_CLREQ_RETVAL(tid, ret);
1837 static Bool whined = False;
1839 if (!whined && VG_(clo_verbosity) > 2) {
1840 // Allow for requests in core, but defined by tools, which
1841 // have 0 and 0 in their two high bytes.
1842 Char c1 = (arg[0] >> 24) & 0xff;
1843 Char c2 = (arg[0] >> 16) & 0xff;
1844 if (c1 == 0) c1 = '_';
1845 if (c2 == 0) c2 = '_';
1846 VG_(message)(Vg_UserMsg, "Warning:\n"
1847 " unhandled client request: 0x%lx (%c%c+0x%lx). Perhaps\n"
1848 " VG_(needs).client_requests should be set?\n",
1849 arg[0], c1, c2, arg[0] & 0xffff);
1858 va_list_casting_error_NORETURN:
1860 "Valgrind: fatal error - cannot continue: use of the deprecated\n"
1861 "client requests VG_USERREQ__PRINTF or VG_USERREQ__PRINTF_BACKTRACE\n"
1862 "on a platform where they cannot be supported. Please use the\n"
1863 "equivalent _VALIST_BY_REF versions instead.\n"
1865 "This is a binary-incompatible change in Valgrind's client request\n"
1866 "mechanism. It is unfortunate, but difficult to avoid. End-users\n"
1867 "are expected to almost never see this message. The only case in\n"
1868 "which you might see this message is if your code uses the macros\n"
1869 "VALGRIND_PRINTF or VALGRIND_PRINTF_BACKTRACE. If so, you will need\n"
1870 "to recompile such code, using the header files from this version of\n"
1871 "Valgrind, and not any previous version.\n"
1873 "If you see this mesage in any other circumstances, it is probably\n"
1874 "a bug in Valgrind. In this case, please file a bug report at\n"
1876 " http://www.valgrind.org/support/bug_reports.html\n"
1884 /* ---------------------------------------------------------------------
1885 Sanity checking (permanently engaged)
1886 ------------------------------------------------------------------ */
1888 /* Internal consistency checks on the sched structures. */
1890 void scheduler_sanity ( ThreadId tid )
1893 static UInt lasttime = 0;
1895 Int lwpid = VG_(gettid)();
1897 if (!VG_(is_running_thread)(tid)) {
1898 VG_(message)(Vg_DebugMsg,
1899 "Thread %d is supposed to be running, "
1900 "but doesn't own the_BigLock (owned by %d)\n",
1901 tid, VG_(running_tid));
1905 if (lwpid != VG_(threads)[tid].os_state.lwpid) {
1906 VG_(message)(Vg_DebugMsg,
1907 "Thread %d supposed to be in LWP %d, but we're actually %d\n",
1908 tid, VG_(threads)[tid].os_state.lwpid, VG_(gettid)());
1912 #if !defined(VGO_darwin)
1914 if (lwpid != the_BigLock.owner_lwpid) {
1915 VG_(message)(Vg_DebugMsg,
1916 "Thread (LWPID) %d doesn't own the_BigLock\n",
1922 /* Periodically show the state of all threads, for debugging
1924 now = VG_(read_millisecond_timer)();
1925 if (0 && (!bad) && (lasttime + 4000/*ms*/ <= now)) {
1927 VG_(printf)("\n------------ Sched State at %d ms ------------\n",
1929 VG_(show_sched_status)();
1932 /* core_panic also shows the sched status, which is why we don't
1933 show it above if bad==True. */
1935 VG_(core_panic)("scheduler_sanity: failed");
1938 void VG_(sanity_check_general) ( Bool force_expensive )
1942 static UInt next_slow_check_at = 1;
1943 static UInt slow_check_interval = 25;
1945 if (VG_(clo_sanity_level) < 1) return;
1947 /* --- First do all the tests that we can do quickly. ---*/
1949 sanity_fast_count++;
1951 /* Check stuff pertaining to the memory check system. */
1953 /* Check that nobody has spuriously claimed that the first or
1954 last 16 pages of memory have become accessible [...] */
1955 if (VG_(needs).sanity_checks) {
1956 vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check));
1959 /* --- Now some more expensive checks. ---*/
1961 /* Once every now and again, check some more expensive stuff.
1962 Gradually increase the interval between such checks so as not to
1963 burden long-running programs too much. */
1964 if ( force_expensive
1965 || VG_(clo_sanity_level) > 1
1966 || (VG_(clo_sanity_level) == 1
1967 && sanity_fast_count == next_slow_check_at)) {
1969 if (0) VG_(printf)("SLOW at %d\n", sanity_fast_count-1);
1971 next_slow_check_at = sanity_fast_count - 1 + slow_check_interval;
1972 slow_check_interval++;
1973 sanity_slow_count++;
1975 if (VG_(needs).sanity_checks) {
1976 vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check));
1979 /* Look for stack overruns. Visit all threads. */
1980 for (tid = 1; tid < VG_N_THREADS; tid++) {
1984 if (VG_(threads)[tid].status == VgTs_Empty ||
1985 VG_(threads)[tid].status == VgTs_Zombie)
1990 VG_(get_ThreadState)(tid)->os_state.valgrind_stack_base;
1992 = 4096; // Let's say. Checking more causes lots of L2 misses.
1994 = VG_(am_get_VgStack_unused_szB)(stack, limit);
1995 if (remains < limit)
1996 VG_(message)(Vg_DebugMsg,
1997 "WARNING: Thread %d is within %ld bytes "
1998 "of running out of stack!\n",
2003 if (VG_(clo_sanity_level) > 1) {
2004 /* Check sanity of the low-level memory manager. Note that bugs
2005 in the client's code can cause this to fail, so we don't do
2006 this check unless specially asked for. And because it's
2007 potentially very expensive. */
2008 VG_(sanity_check_malloc_all)();
2012 /*--------------------------------------------------------------------*/
2014 /*--------------------------------------------------------------------*/