2 /*--------------------------------------------------------------------*/
3 /*--- Implementation of POSIX signals. m_signals.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2010 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
34 There are 4 distinct classes of signal:
36 1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
37 TRAP): these are signals as a result of an instruction fault. If
38 we get one while running client code, then we just do the
39 appropriate thing. If it happens while running Valgrind code, then
40 it indicates a Valgrind bug. Note that we "manually" implement
41 automatic stack growth, such that if a fault happens near the
42 client process stack, it is extended in the same way the kernel
43 would, and the fault is never reported to the client program.
45 2. Asynchronous variants of the above signals: If the kernel tries
46 to deliver a sync signal while it is blocked, it just kills the
47 process. Therefore, we can't block those signals if we want to be
48 able to report on bugs in Valgrind. This means that we're also
49 open to receiving those signals from other processes, sent with
50 kill. We could get away with just dropping them, since they aren't
51 really signals that processes send to each other.
53 3. Synchronous, general signals. If a thread/process sends itself
54 a signal with kill, its expected to be synchronous: ie, the signal
55 will have been delivered by the time the syscall finishes.
57 4. Asynchronous, general signals. All other signals, sent by
58 another process with kill. These are generally blocked, except for
59 two special cases: we poll for them each time we're about to run a
60 thread for a time quanta, and while running blocking syscalls.
63 In addition, we reserve one signal for internal use: SIGVGKILL.
64 SIGVGKILL is used to terminate threads. When one thread wants
65 another to exit, it will set its exitreason and send it SIGVGKILL
66 if it appears to be blocked in a syscall.
69 We use a kernel thread for each application thread. When the
70 thread allows itself to be open to signals, it sets the thread
71 signal mask to what the client application set it to. This means
72 that we get the kernel to do all signal routing: under Valgrind,
73 signals get delivered in the same way as in the non-Valgrind case
74 (the exception being for the sync signal set, since they're almost
81 First off, we take note of the client's requests (via sys_sigaction
82 and sys_sigprocmask) to set the signal state (handlers for each
83 signal, which are process-wide, + a mask for each signal, which is
84 per-thread). This info is duly recorded in the SCSS (static Client
85 signal state) in m_signals.c, and if the client later queries what
86 the state is, we merely fish the relevant info out of SCSS and give
89 However, we set the real signal state in the kernel to something
90 entirely different. This is recorded in SKSS, the static Kernel
91 signal state. What's nice (to the extent that anything is nice w.r.t
92 signals) is that there's a pure function to calculate SKSS from SCSS,
93 calculate_SKSS_from_SCSS. So when the client changes SCSS then we
94 recompute the associated SKSS and apply any changes from the previous
95 SKSS through to the kernel.
97 Now, that said, the general scheme we have now is, that regardless of
98 what the client puts into the SCSS (viz, asks for), what we would
99 like to do is as follows:
101 (1) run code on the virtual CPU with all signals blocked
103 (2) at convenient moments for us (that is, when the VCPU stops, and
104 control is back with the scheduler), ask the kernel "do you have
105 any signals for me?" and if it does, collect up the info, and
106 deliver them to the client (by building sigframes).
108 And that's almost what we do. The signal polling is done by
109 VG_(poll_signals), which calls through to VG_(sigtimedwait_zero) to
110 do the dirty work. (of which more later).
112 By polling signals, rather than catching them, we get to deal with
113 them only at convenient moments, rather than having to recover from
114 taking a signal while generated code is running.
116 Now unfortunately .. the above scheme only works for so-called async
117 signals. An async signal is one which isn't associated with any
118 particular instruction, eg Control-C (SIGINT). For those, it doesn't
119 matter if we don't deliver the signal to the client immediately; it
120 only matters that we deliver it eventually. Hence polling is OK.
122 But the other group -- sync signals -- are all related by the fact
123 that they are various ways for the host CPU to fail to execute an
124 instruction: SIGILL, SIGSEGV, SIGFPU. And they can't be deferred,
125 because obviously if a host instruction can't execute, well then we
126 have to immediately do Plan B, whatever that is.
128 So the next approximation of what happens is:
130 (1) run code on vcpu with all async signals blocked
132 (2) at convenient moments (when NOT running the vcpu), poll for async
135 (1) and (2) together imply that if the host does deliver a signal to
136 async_signalhandler while the VCPU is running, something's
139 (3) when running code on vcpu, don't block sync signals. Instead
140 register sync_signalhandler and catch any such via that. Of
141 course, that means an ugly recovery path if we do -- the
142 sync_signalhandler has to longjump, exiting out of the generated
143 code, and the assembly-dispatcher thingy that runs it, and gets
144 caught in m_scheduler, which then tells m_signals to deliver the
147 Now naturally (ha ha) even that might be tolerable, but there's
148 something worse: dealing with signals delivered to threads in
151 Obviously from the above, SKSS's signal mask (viz, what we really run
152 with) is way different from SCSS's signal mask (viz, what the client
153 thread thought it asked for). (eg) It may well be that the client
154 did not block control-C, so that it just expects to drop dead if it
155 receives ^C whilst blocked in a syscall, but by default we are
156 running with all async signals blocked, and so that signal could be
157 arbitrarily delayed, or perhaps even lost (not sure).
159 So what we have to do, when doing any syscall which SfMayBlock, is to
160 quickly switch in the SCSS-specified signal mask just before the
161 syscall, and switch it back just afterwards, and hope that we don't
162 get caught up in some wierd race condition. This is the primary
163 purpose of the ultra-magical pieces of assembly code in
164 coregrind/m_syswrap/syscall-<plat>.S
168 The ways in which V can come to hear of signals that need to be
169 forwarded to the client as are follows:
171 sync signals: can arrive at any time whatsoever. These are caught
172 by sync_signalhandler
176 if running generated code
177 then these are blocked, so we don't expect to catch them in
181 if thread is blocked in a syscall marked SfMayBlock
182 then signals may be delivered to async_sighandler, since we
183 temporarily unblocked them for the duration of the syscall,
184 by using the real (SCSS) mask for this thread
186 else we're doing misc housekeeping activities (eg, making a translation,
187 washing our hair, etc). As in the normal case, these signals are
188 blocked, but we can and do poll for them using VG_(poll_signals).
190 Now, re VG_(poll_signals), it polls the kernel by doing
191 VG_(sigtimedwait_zero). This is trivial on Linux, since it's just a
192 syscall. But on Darwin and AIX, we have to cobble together the
193 functionality in a tedious, longwinded and probably error-prone way.
196 #include "pub_core_basics.h"
197 #include "pub_core_vki.h"
198 #include "pub_core_vkiscnums.h"
199 #include "pub_core_debuglog.h"
200 #include "pub_core_threadstate.h"
201 #include "pub_core_xarray.h"
202 #include "pub_core_clientstate.h"
203 #include "pub_core_aspacemgr.h"
204 #include "pub_core_debugger.h" // For VG_(start_debugger)
205 #include "pub_core_errormgr.h"
206 #include "pub_core_libcbase.h"
207 #include "pub_core_libcassert.h"
208 #include "pub_core_libcprint.h"
209 #include "pub_core_libcproc.h"
210 #include "pub_core_libcsignal.h"
211 #include "pub_core_machine.h"
212 #include "pub_core_mallocfree.h"
213 #include "pub_core_options.h"
214 #include "pub_core_scheduler.h"
215 #include "pub_core_signals.h"
216 #include "pub_core_sigframe.h" // For VG_(sigframe_create)()
217 #include "pub_core_stacks.h" // For VG_(change_stack)()
218 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
219 #include "pub_core_syscall.h"
220 #include "pub_core_syswrap.h"
221 #include "pub_core_tooliface.h"
222 #include "pub_core_coredump.h"
225 /* ---------------------------------------------------------------------
227 ------------------------------------------------------------------ */
229 static void sync_signalhandler ( Int sigNo, vki_siginfo_t *info,
230 struct vki_ucontext * );
231 static void async_signalhandler ( Int sigNo, vki_siginfo_t *info,
232 struct vki_ucontext * );
233 static void sigvgkill_handler ( Int sigNo, vki_siginfo_t *info,
234 struct vki_ucontext * );
236 static const Char *signame(Int sigNo);
238 /* Maximum usable signal. */
239 Int VG_(max_signal) = _VKI_NSIG;
241 #define N_QUEUED_SIGNALS 8
243 typedef struct SigQueue {
245 vki_siginfo_t sigs[N_QUEUED_SIGNALS];
248 /* ------ Macros for pulling stuff out of ucontexts ------ */
250 /* Q: what does VG_UCONTEXT_SYSCALL_SYSRES do? A: let's suppose the
251 machine context (uc) reflects the situation that a syscall had just
252 completed, quite literally -- that is, that the program counter was
253 now at the instruction following the syscall. (or we're slightly
254 downstream, but we're sure no relevant register has yet changed
255 value.) Then VG_UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
256 the result of the syscall; it does this by fishing relevant bits of
257 the machine state out of the uc. Of course if the program counter
258 was somewhere else entirely then the result is likely to be
259 meaningless, so the caller of VG_UCONTEXT_SYSCALL_SYSRES has to be
260 very careful to pay attention to the results only when it is sure
261 that the said constraint on the program counter is indeed valid. */
263 #if defined(VGP_x86_linux)
264 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
265 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
266 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
267 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
268 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
269 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
270 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip); \
271 (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp); \
272 (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp; \
275 #elif defined(VGP_amd64_linux)
276 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.rip)
277 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.rsp)
278 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
279 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
280 VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
281 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
282 { (srP)->r_pc = (uc)->uc_mcontext.rip; \
283 (srP)->r_sp = (uc)->uc_mcontext.rsp; \
284 (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
287 #elif defined(VGP_ppc32_linux)
288 /* Comments from Paul Mackerras 25 Nov 05:
290 > I'm tracking down a problem where V's signal handling doesn't
291 > work properly on a ppc440gx running 2.4.20. The problem is that
292 > the ucontext being presented to V's sighandler seems completely
295 > V's kernel headers and hence ucontext layout are derived from
296 > 2.6.9. I compared include/asm-ppc/ucontext.h from 2.4.20 and
299 > Can I just check my interpretation: the 2.4.20 one contains the
300 > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
301 > to said struct? And so if V is using the 2.6.13 struct then a
302 > 2.4.20 one will make no sense to it.
304 Not quite... what is inline in the 2.4.20 version is a
305 sigcontext_struct, not an mcontext. The sigcontext looks like
308 struct sigcontext_struct {
309 unsigned long _unused[4];
311 unsigned long handler;
312 unsigned long oldmask;
313 struct pt_regs *regs;
316 The regs pointer of that struct ends up at the same offset as the
317 uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
318 same as the mc_gregs field of the mcontext. In fact the integer
319 regs are followed in memory by the floating point regs on 2.4.20.
321 Thus if you are using the 2.6 definitions, it should work on 2.4.20
322 provided that you go via uc->uc_regs rather than looking in
323 uc->uc_mcontext directly.
325 There is another subtlety: 2.4.20 doesn't save the vector regs when
326 delivering a signal, and 2.6.x only saves the vector regs if the
327 process has ever used an altivec instructions. If 2.6.x does save
328 the vector regs, it sets the MSR_VEC bit in
329 uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it. That bit
330 will always be clear under 2.4.20. So you can use that bit to tell
331 whether uc->uc_regs->mc_vregs is valid. */
332 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
333 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
334 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
335 /* Convert the values in uc_mcontext r3,cr into a SysRes. */ \
336 VG_(mk_SysRes_ppc32_linux)( \
337 (uc)->uc_regs->mc_gregs[VKI_PT_R3], \
338 (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1) \
340 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
341 { (srP)->r_pc = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_NIP]); \
342 (srP)->r_sp = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_R1]); \
343 (srP)->misc.PPC32.r_lr = (uc)->uc_regs->mc_gregs[VKI_PT_LNK]; \
346 #elif defined(VGP_ppc64_linux)
347 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
348 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
349 /* Dubious hack: if there is an error, only consider the lowest 8
350 bits of r3. memcheck/tests/post-syscall shows a case where an
351 interrupted syscall should have produced a ucontext with 0x4
352 (VKI_EINTR) in r3 but is in fact producing 0x204. */
353 /* Awaiting clarification from PaulM. Evidently 0x204 is
354 ERESTART_RESTARTBLOCK, which shouldn't have made it into user
356 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext* uc )
358 ULong err = (uc->uc_mcontext.gp_regs[VKI_PT_CCR] >> 28) & 1;
359 ULong r3 = uc->uc_mcontext.gp_regs[VKI_PT_R3];
361 return VG_(mk_SysRes_ppc64_linux)( r3, err );
363 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
364 { (srP)->r_pc = (uc)->uc_mcontext.gp_regs[VKI_PT_NIP]; \
365 (srP)->r_sp = (uc)->uc_mcontext.gp_regs[VKI_PT_R1]; \
366 (srP)->misc.PPC64.r_lr = (uc)->uc_mcontext.gp_regs[VKI_PT_LNK]; \
369 #elif defined(VGP_arm_linux)
370 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.arm_pc)
371 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.arm_sp)
372 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
373 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
374 VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
375 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
376 { (srP)->r_pc = (uc)->uc_mcontext.arm_pc; \
377 (srP)->r_sp = (uc)->uc_mcontext.arm_sp; \
378 (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
379 (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
380 (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
383 #elif defined(VGP_ppc32_aix5)
385 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
386 # include <ucontext.h>
387 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
388 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
389 ucontext_t* uc = (ucontext_t*)ucV;
390 struct __jmpbuf* mc = &(uc->uc_mcontext);
391 struct mstsave* jc = &mc->jmp_context;
394 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
395 ucontext_t* uc = (ucontext_t*)ucV;
396 struct __jmpbuf* mc = &(uc->uc_mcontext);
397 struct mstsave* jc = &mc->jmp_context;
400 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV ) {
401 ucontext_t* uc = (ucontext_t*)ucV;
402 struct __jmpbuf* mc = &(uc->uc_mcontext);
403 struct mstsave* jc = &mc->jmp_context;
404 return VG_(mk_SysRes_ppc32_aix5)( jc->gpr[3], jc->gpr[4] );
406 static inline Addr VG_UCONTEXT_LINK_REG( void* ucV ) {
407 ucontext_t* uc = (ucontext_t*)ucV;
408 struct __jmpbuf* mc = &(uc->uc_mcontext);
409 struct mstsave* jc = &mc->jmp_context;
412 static inline Addr VG_UCONTEXT_FRAME_PTR( void* ucV ) {
413 return VG_UCONTEXT_STACK_PTR(ucV);
416 #elif defined(VGP_ppc64_aix5)
418 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
419 # include <ucontext.h>
420 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
421 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
422 ucontext_t* uc = (ucontext_t*)ucV;
423 struct __jmpbuf* mc = &(uc->uc_mcontext);
424 struct __context64* jc = &mc->jmp_context;
427 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
428 ucontext_t* uc = (ucontext_t*)ucV;
429 struct __jmpbuf* mc = &(uc->uc_mcontext);
430 struct __context64* jc = &mc->jmp_context;
433 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV ) {
434 ucontext_t* uc = (ucontext_t*)ucV;
435 struct __jmpbuf* mc = &(uc->uc_mcontext);
436 struct __context64* jc = &mc->jmp_context;
437 return VG_(mk_SysRes_ppc32_aix5)( jc->gpr[3], jc->gpr[4] );
439 static inline Addr VG_UCONTEXT_LINK_REG( void* ucV ) {
440 ucontext_t* uc = (ucontext_t*)ucV;
441 struct __jmpbuf* mc = &(uc->uc_mcontext);
442 struct __context64* jc = &mc->jmp_context;
445 static inline Addr VG_UCONTEXT_FRAME_PTR( void* ucV ) {
446 return VG_UCONTEXT_STACK_PTR(ucV);
449 #elif defined(VGP_x86_darwin)
451 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
452 ucontext_t* uc = (ucontext_t*)ucV;
453 struct __darwin_mcontext32* mc = uc->uc_mcontext;
454 struct __darwin_i386_thread_state* ss = &mc->__ss;
457 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
458 ucontext_t* uc = (ucontext_t*)ucV;
459 struct __darwin_mcontext32* mc = uc->uc_mcontext;
460 struct __darwin_i386_thread_state* ss = &mc->__ss;
463 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
465 /* this is complicated by the problem that there are 3 different
466 kinds of syscalls, each with its own return convention.
467 NB: scclass is a host word, hence UWord is good for both
468 amd64-darwin and x86-darwin */
469 ucontext_t* uc = (ucontext_t*)ucV;
470 struct __darwin_mcontext32* mc = uc->uc_mcontext;
471 struct __darwin_i386_thread_state* ss = &mc->__ss;
472 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
473 UInt carry = 1 & ss->__eflags;
478 case VG_DARWIN_SYSCALL_CLASS_UNIX:
483 case VG_DARWIN_SYSCALL_CLASS_MACH:
486 case VG_DARWIN_SYSCALL_CLASS_MDEP:
493 return VG_(mk_SysRes_x86_darwin)( scclass, err ? True : False,
497 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
499 ucontext_t* uc = (ucontext_t*)(ucV);
500 struct __darwin_mcontext32* mc = uc->uc_mcontext;
501 struct __darwin_i386_thread_state* ss = &mc->__ss;
502 srP->r_pc = (ULong)(ss->__eip);
503 srP->r_sp = (ULong)(ss->__esp);
504 srP->misc.X86.r_ebp = (UInt)(ss->__ebp);
507 #elif defined(VGP_amd64_darwin)
509 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
512 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
515 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
520 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
525 #elif defined(VGO_l4re)
526 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
527 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
528 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.ebp)
529 # define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.eax)
530 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
531 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
532 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
533 # define VG_UCONTEXT_LINK_REG(uc) 0 /* Dude, where's my LR? */
537 # error Unknown platform
541 /* ------ Macros for pulling stuff out of siginfos ------ */
543 /* These macros allow use of uniform names when working with
544 both the Linux and AIX vki definitions. */
545 #if defined(VGO_linux)
546 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
547 # define VKI_SIGINFO_si_pid _sifields._kill._pid
548 #elif defined(VGO_aix5)
549 # define VKI_SIGINFO_si_addr si_addr
550 # define VKI_SIGINFO_si_pid si_pid
551 #elif defined(VGO_darwin)
552 # define VKI_SIGINFO_si_addr si_addr
553 # define VKI_SIGINFO_si_pid si_pid
554 #elif defined(VGO_l4re)
555 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
556 # define VKI_SIGINFO_si_pid _sifields._kill._pid
562 /* ---------------------------------------------------------------------
563 HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
564 ------------------------------------------------------------------ */
566 /* ---------------------------------------------------------------------
567 Signal state for this process.
568 ------------------------------------------------------------------ */
571 /* Base-ment of these arrays[_VKI_NSIG].
573 Valid signal numbers are 1 .. _VKI_NSIG inclusive.
574 Rather than subtracting 1 for indexing these arrays, which
575 is tedious and error-prone, they are simply dimensioned 1 larger,
576 and entry [0] is not used.
580 /* -----------------------------------------------------
581 Static client signal state (SCSS). This is the state
582 that the client thinks it has the kernel in.
583 SCSS records verbatim the client's settings. These
584 are mashed around only when SKSS is calculated from it.
585 -------------------------------------------------- */
589 void* scss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
592 vki_sigset_t scss_mask;
593 void* scss_restorer; /* where sigreturn goes */
594 void* scss_sa_tramp; /* sa_tramp setting, Darwin only */
595 /* re _restorer and _sa_tramp, we merely record the values
596 supplied when the client does 'sigaction' and give them back
597 when requested. Otherwise they are simply ignored. */
603 /* per-signal info */
604 SCSS_Per_Signal scss_per_sig[1+_VKI_NSIG];
606 /* Additional elements to SCSS not stored here:
607 - for each thread, the thread's blocking mask
608 - for each thread in WaitSIG, the set of waited-on sigs
616 /* -----------------------------------------------------
617 Static kernel signal state (SKSS). This is the state
618 that we have the kernel in. It is computed from SCSS.
619 -------------------------------------------------- */
622 sigprocmask assigns to all thread masks
623 so that at least everything is always consistent
625 SA_SIGINFO -- we always set it, and honour it for the client
626 SA_NOCLDSTOP -- passed to kernel
627 SA_ONESHOT or SA_RESETHAND -- pass through
628 SA_RESTART -- we observe this but set our handlers to always restart
629 SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
630 SA_ONSTACK -- pass through
631 SA_NOCLDWAIT -- pass through
637 void* skss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN
638 or ptr to our handler */
640 /* There is no skss_mask, since we know that we will always ask
641 for all signals to be blocked in our sighandlers. */
642 /* Also there is no skss_restorer. */
648 SKSS_Per_Signal skss_per_sig[1+_VKI_NSIG];
654 static Bool is_sig_ign(Int sigNo)
656 vg_assert(sigNo >= 1 && sigNo <= _VKI_NSIG);
658 return scss.scss_per_sig[sigNo].scss_handler == VKI_SIG_IGN;
661 /* ---------------------------------------------------------------------
662 Compute the SKSS required by the current SCSS.
663 ------------------------------------------------------------------ */
666 void pp_SKSS ( void )
669 VG_(printf)("\n\nSKSS:\n");
670 for (sig = 1; sig <= _VKI_NSIG; sig++) {
671 VG_(printf)("sig %d: handler %p, flags 0x%x\n", sig,
672 skss.skss_per_sig[sig].skss_handler,
673 skss.skss_per_sig[sig].skss_flags );
678 /* This is the core, clever bit. Computation is as follows:
681 handler = if client has a handler, then our handler
682 else if client is DFL, then our handler as well
683 else (client must be IGN)
687 void calculate_SKSS_from_SCSS ( SKSS* dst )
693 for (sig = 1; sig <= _VKI_NSIG; sig++) {
697 scss_handler = scss.scss_per_sig[sig].scss_handler;
698 scss_flags = scss.scss_per_sig[sig].scss_flags;
706 /* For these, we always want to catch them and report, even
707 if the client code doesn't. */
708 skss_handler = sync_signalhandler;
712 /* Let the kernel handle SIGCONT unless the client is actually
717 /* For signals which are have a default action of Ignore,
718 only set a handler if the client has set a signal handler.
719 Otherwise the kernel will interrupt a syscall which
720 wouldn't have otherwise been interrupted. */
721 if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
722 skss_handler = VKI_SIG_DFL;
723 else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
724 skss_handler = VKI_SIG_IGN;
726 skss_handler = async_signalhandler;
730 // VKI_SIGVG* are runtime variables, so we can't make them
731 // cases in the switch, so we handle them in the 'default' case.
732 if (sig == VG_SIGVGKILL)
733 skss_handler = sigvgkill_handler;
735 if (scss_handler == VKI_SIG_IGN)
736 skss_handler = VKI_SIG_IGN;
738 skss_handler = async_signalhandler;
747 /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
748 skss_flags |= scss_flags & (VKI_SA_NOCLDSTOP | VKI_SA_NOCLDWAIT);
750 /* SA_ONESHOT: ignore client setting */
752 /* SA_RESTART: ignore client setting and always set it for us.
753 Though we never rely on the kernel to restart a
754 syscall, we observe whether it wanted to restart the syscall
755 or not, which is needed by
756 VG_(fixup_guest_state_after_syscall_interrupted) */
757 skss_flags |= VKI_SA_RESTART;
759 /* SA_NOMASK: ignore it */
761 /* SA_ONSTACK: client setting is irrelevant here */
762 /* We don't set a signal stack, so ignore */
764 /* always ask for SA_SIGINFO */
765 skss_flags |= VKI_SA_SIGINFO;
767 /* use our own restorer */
768 skss_flags |= VKI_SA_RESTORER;
770 /* Create SKSS entry for this signal. */
771 if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
772 dst->skss_per_sig[sig].skss_handler = skss_handler;
774 dst->skss_per_sig[sig].skss_handler = VKI_SIG_DFL;
776 dst->skss_per_sig[sig].skss_flags = skss_flags;
780 vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler == VKI_SIG_DFL);
781 vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler == VKI_SIG_DFL);
788 /* ---------------------------------------------------------------------
789 After a possible SCSS change, update SKSS and the kernel itself.
790 ------------------------------------------------------------------ */
792 // We need two levels of macro-expansion here to convert __NR_rt_sigreturn
793 // to a number before converting it to a string... sigh.
794 extern void my_sigreturn(void);
796 #if defined(VGP_x86_linux)
797 # define _MY_SIGRETURN(name) \
800 " movl $" #name ", %eax\n" \
804 #elif defined(VGP_amd64_linux)
805 # define _MY_SIGRETURN(name) \
808 " movq $" #name ", %rax\n" \
812 #elif defined(VGP_ppc32_linux)
813 # define _MY_SIGRETURN(name) \
816 " li 0, " #name "\n" \
820 #elif defined(VGP_ppc64_linux)
821 # define _MY_SIGRETURN(name) \
823 ".globl my_sigreturn\n" \
824 ".section \".opd\",\"aw\"\n" \
827 ".quad .my_sigreturn,.TOC.@tocbase,0\n" \
829 ".type .my_sigreturn,@function\n" \
830 ".globl .my_sigreturn\n" \
832 " li 0, " #name "\n" \
835 #elif defined(VGP_arm_linux)
836 # define _MY_SIGRETURN(name) \
838 "my_sigreturn:\n\t" \
839 " mov r7, #" #name "\n\t" \
840 " svc 0x00000000\n" \
843 #elif defined(VGP_ppc32_aix5)
844 # define _MY_SIGRETURN(name) \
845 ".globl my_sigreturn\n" \
848 #elif defined(VGP_ppc64_aix5)
849 # define _MY_SIGRETURN(name) \
850 ".globl my_sigreturn\n" \
854 #elif defined(VGP_x86_darwin)
855 # define _MY_SIGRETURN(name) \
858 "movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
861 #elif defined(VGP_amd64_darwin)
863 # define _MY_SIGRETURN(name) \
868 #elif defined(VGO_l4re)
869 # define _MY_SIGRETURN(name) \
870 ".globl my_sigreturn\n" \
875 # error Unknown platform
878 #define MY_SIGRETURN(name) _MY_SIGRETURN(name)
880 MY_SIGRETURN(__NR_rt_sigreturn)
884 static void handle_SCSS_change ( Bool force_update )
888 vki_sigaction_toK_t ksa;
889 vki_sigaction_fromK_t ksa_old;
891 /* Remember old SKSS and calculate new one. */
893 calculate_SKSS_from_SCSS ( &skss );
895 /* Compare the new SKSS entries vs the old ones, and update kernel
896 where they differ. */
897 for (sig = 1; sig <= VG_(max_signal); sig++) {
899 /* Trying to do anything with SIGKILL is pointless; just ignore
901 if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
905 if ((skss_old.skss_per_sig[sig].skss_handler
906 == skss.skss_per_sig[sig].skss_handler)
907 && (skss_old.skss_per_sig[sig].skss_flags
908 == skss.skss_per_sig[sig].skss_flags))
913 ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
914 ksa.sa_flags = skss.skss_per_sig[sig].skss_flags;
915 # if !defined(VGP_ppc32_linux) && \
916 !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
917 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
918 ksa.sa_restorer = my_sigreturn;
920 /* Re above ifdef (also the assertion below), PaulM says:
921 The sa_restorer field is not used at all on ppc. Glibc
922 converts the sigaction you give it into a kernel sigaction,
923 but it doesn't put anything in the sa_restorer field.
926 /* block all signals in handler */
927 VG_(sigfillset)( &ksa.sa_mask );
928 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGKILL );
929 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGSTOP );
931 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
932 VG_(dmsg)("setting ksig %d to: hdlr %p, flags 0x%lx, "
933 "mask(msb..lsb) 0x%llx 0x%llx\n",
934 sig, ksa.ksa_handler,
936 _VKI_NSIG_WORDS > 1 ? (ULong)ksa.sa_mask.sig[1] : 0,
937 (ULong)ksa.sa_mask.sig[0]);
939 res = VG_(sigaction)( sig, &ksa, &ksa_old );
942 /* Since we got the old sigaction more or less for free, might
943 as well extract the maximum sanity-check value from it. */
945 vg_assert(ksa_old.ksa_handler
946 == skss_old.skss_per_sig[sig].skss_handler);
947 vg_assert(ksa_old.sa_flags
948 == skss_old.skss_per_sig[sig].skss_flags);
949 # if !defined(VGP_ppc32_linux) && \
950 !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
951 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
952 vg_assert(ksa_old.sa_restorer
955 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
956 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGSTOP );
957 vg_assert(VG_(isfullsigset)( &ksa_old.sa_mask ));
963 /* ---------------------------------------------------------------------
964 Update/query SCSS in accordance with client requests.
965 ------------------------------------------------------------------ */
967 /* Logic for this alt-stack stuff copied directly from do_sigaltstack
968 in kernel/signal.[ch] */
970 /* True if we are on the alternate signal stack. */
971 static Bool on_sig_stack ( ThreadId tid, Addr m_SP )
973 ThreadState *tst = VG_(get_ThreadState)(tid);
975 return (m_SP - (Addr)tst->altstack.ss_sp < (Addr)tst->altstack.ss_size);
978 static Int sas_ss_flags ( ThreadId tid, Addr m_SP )
980 ThreadState *tst = VG_(get_ThreadState)(tid);
982 return (tst->altstack.ss_size == 0
984 : on_sig_stack(tid, m_SP) ? VKI_SS_ONSTACK : 0);
988 SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss, vki_stack_t* oss )
992 vg_assert(VG_(is_valid_tid)(tid));
993 m_SP = VG_(get_SP)(tid);
995 if (VG_(clo_trace_signals))
996 VG_(dmsg)("sys_sigaltstack: tid %d, "
997 "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)\n",
1000 (ULong)(ss ? ss->ss_size : 0),
1001 (ULong)(ss ? ss->ss_flags : 0),
1002 (void*)oss, (void*)m_SP);
1005 oss->ss_sp = VG_(threads)[tid].altstack.ss_sp;
1006 oss->ss_size = VG_(threads)[tid].altstack.ss_size;
1007 oss->ss_flags = VG_(threads)[tid].altstack.ss_flags
1008 | sas_ss_flags(tid, m_SP);
1012 if (on_sig_stack(tid, VG_(get_SP)(tid))) {
1013 return VG_(mk_SysRes_Error)( VKI_EPERM );
1015 if (ss->ss_flags != VKI_SS_DISABLE
1016 && ss->ss_flags != VKI_SS_ONSTACK
1017 && ss->ss_flags != 0) {
1018 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1020 if (ss->ss_flags == VKI_SS_DISABLE) {
1021 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
1023 if (ss->ss_size < VKI_MINSIGSTKSZ) {
1024 return VG_(mk_SysRes_Error)( VKI_ENOMEM );
1027 VG_(threads)[tid].altstack.ss_sp = ss->ss_sp;
1028 VG_(threads)[tid].altstack.ss_size = ss->ss_size;
1029 VG_(threads)[tid].altstack.ss_flags = 0;
1032 return VG_(mk_SysRes_Success)( 0 );
1036 SysRes VG_(do_sys_sigaction) ( Int signo,
1037 const vki_sigaction_toK_t* new_act,
1038 vki_sigaction_fromK_t* old_act )
1040 if (VG_(clo_trace_signals))
1041 VG_(dmsg)("sys_sigaction: sigNo %d, "
1042 "new %#lx, old %#lx, new flags 0x%llx\n",
1043 signo, (UWord)new_act, (UWord)old_act,
1044 (ULong)(new_act ? new_act->sa_flags : 0));
1046 /* Rule out various error conditions. The aim is to ensure that if
1047 when the call is passed to the kernel it will definitely
1050 /* Reject out-of-range signal numbers. */
1051 if (signo < 1 || signo > VG_(max_signal)) goto bad_signo;
1053 /* don't let them use our signals */
1054 if ( (signo > VG_SIGVGRTUSERMAX)
1056 && !(new_act->ksa_handler == VKI_SIG_DFL
1057 || new_act->ksa_handler == VKI_SIG_IGN) )
1058 goto bad_signo_reserved;
1060 /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
1061 if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
1063 && new_act->ksa_handler != VKI_SIG_DFL)
1064 goto bad_sigkill_or_sigstop;
1066 /* If the client supplied non-NULL old_act, copy the relevant SCSS
1069 old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
1070 old_act->sa_flags = scss.scss_per_sig[signo].scss_flags;
1071 old_act->sa_mask = scss.scss_per_sig[signo].scss_mask;
1072 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
1073 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1074 old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
1078 /* And now copy new SCSS entry from new_act. */
1080 scss.scss_per_sig[signo].scss_handler = new_act->ksa_handler;
1081 scss.scss_per_sig[signo].scss_flags = new_act->sa_flags;
1082 scss.scss_per_sig[signo].scss_mask = new_act->sa_mask;
1084 scss.scss_per_sig[signo].scss_restorer = NULL;
1085 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
1086 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1087 scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
1090 scss.scss_per_sig[signo].scss_sa_tramp = NULL;
1091 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1092 scss.scss_per_sig[signo].scss_sa_tramp = new_act->sa_tramp;
1095 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
1096 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
1099 /* All happy bunnies ... */
1101 handle_SCSS_change( False /* lazy update */ );
1103 return VG_(mk_SysRes_Success)( 0 );
1106 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1107 VG_(umsg)("Warning: bad signal number %d in sigaction()\n", signo);
1109 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1112 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1113 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1115 VG_(umsg)(" the %s signal is used internally by Valgrind\n",
1118 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1120 bad_sigkill_or_sigstop:
1121 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1122 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1124 VG_(umsg)(" the %s signal is uncatchable\n",
1127 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1132 void do_sigprocmask_bitops ( Int vki_how,
1133 vki_sigset_t* orig_set,
1134 vki_sigset_t* modifier )
1138 VG_(sigaddset_from_set)( orig_set, modifier );
1140 case VKI_SIG_UNBLOCK:
1141 VG_(sigdelset_from_set)( orig_set, modifier );
1143 case VKI_SIG_SETMASK:
1144 *orig_set = *modifier;
1147 VG_(core_panic)("do_sigprocmask_bitops");
1153 HChar* format_sigset ( const vki_sigset_t* set )
1155 static HChar buf[128];
1158 VG_(strcpy)(buf, "");
1160 for (w = _VKI_NSIG_WORDS - 1; w >= 0; w--)
1162 # if _VKI_NSIG_BPW == 32
1163 VG_(sprintf)(buf + VG_(strlen)(buf), "%08llx",
1164 set ? (ULong)set->sig[w] : 0);
1165 # elif _VKI_NSIG_BPW == 64
1166 VG_(sprintf)(buf + VG_(strlen)(buf), "%16llx",
1167 set ? (ULong)set->sig[w] : 0);
1169 # error "Unsupported value for _VKI_NSIG_BPW"
1177 This updates the thread's signal mask. There's no such thing as a
1178 process-wide signal mask.
1180 Note that the thread signal masks are an implicit part of SCSS,
1181 which is why this routine is allowed to mess with them.
1184 void do_setmask ( ThreadId tid,
1186 vki_sigset_t* newset,
1187 vki_sigset_t* oldset )
1189 if (VG_(clo_trace_signals))
1190 VG_(dmsg)("do_setmask: tid = %d how = %d (%s), newset = %p (%s)\n",
1192 how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
1193 how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
1194 how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
1195 newset, newset ? format_sigset(newset) : "NULL" );
1197 /* Just do this thread. */
1198 vg_assert(VG_(is_valid_tid)(tid));
1200 *oldset = VG_(threads)[tid].sig_mask;
1201 if (VG_(clo_trace_signals))
1202 VG_(dmsg)("\toldset=%p %s\n", oldset, format_sigset(oldset));
1205 do_sigprocmask_bitops (how, &VG_(threads)[tid].sig_mask, newset );
1206 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
1207 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
1208 VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
1213 SysRes VG_(do_sys_sigprocmask) ( ThreadId tid,
1216 vki_sigset_t* oldset )
1220 case VKI_SIG_UNBLOCK:
1221 case VKI_SIG_SETMASK:
1222 vg_assert(VG_(is_valid_tid)(tid));
1223 do_setmask ( tid, how, set, oldset );
1224 return VG_(mk_SysRes_Success)( 0 );
1227 VG_(dmsg)("sigprocmask: unknown 'how' field %d\n", how);
1228 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1233 /* ---------------------------------------------------------------------
1234 LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
1235 ------------------------------------------------------------------ */
1237 /* ---------------------------------------------------------------------
1238 Handy utilities to block/restore all host signals.
1239 ------------------------------------------------------------------ */
1241 /* Block all host signals, dumping the old mask in *saved_mask. */
1242 static void block_all_host_signals ( /* OUT */ vki_sigset_t* saved_mask )
1244 #if defined(VGO_l4re)
1245 VG_(unimplemented)((char *)__func__);
1248 vki_sigset_t block_procmask;
1249 VG_(sigfillset)(&block_procmask);
1250 ret = VG_(sigprocmask)
1251 (VKI_SIG_SETMASK, &block_procmask, saved_mask);
1252 vg_assert(ret == 0);
1256 /* Restore the blocking mask using the supplied saved one. */
1257 static void restore_all_host_signals ( /* IN */ vki_sigset_t* saved_mask )
1260 ret = VG_(sigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
1261 vg_assert(ret == 0);
1264 void VG_(clear_out_queued_signals)( ThreadId tid, vki_sigset_t* saved_mask )
1266 #if defined(VGO_l4re)
1267 VG_(unimplemented)((char *)__func__);
1269 block_all_host_signals(saved_mask);
1270 if (VG_(threads)[tid].sig_queue != NULL) {
1271 VG_(arena_free)(VG_AR_CORE, VG_(threads)[tid].sig_queue);
1272 VG_(threads)[tid].sig_queue = NULL;
1274 restore_all_host_signals(saved_mask);
1278 /* ---------------------------------------------------------------------
1279 The signal simulation proper. A simplified version of what the
1281 ------------------------------------------------------------------ */
1283 /* Set up a stack frame (VgSigContext) for the client's signal
1286 void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
1287 const struct vki_ucontext *uc )
1289 #if defined(VGO_l4re)
1290 VG_(unimplemented)((char *)__func__);
1292 Addr esp_top_of_frame;
1294 Int sigNo = siginfo->si_signo;
1296 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
1297 vg_assert(VG_(is_valid_tid)(tid));
1298 tst = & VG_(threads)[tid];
1300 if (VG_(clo_trace_signals)) {
1301 VG_(dmsg)("push_signal_frame (thread %d): signal %d\n", tid, sigNo);
1302 VG_(get_and_pp_StackTrace)(tid, 10);
1305 if (/* this signal asked to run on an alt stack */
1306 (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
1307 && /* there is a defined and enabled alt stack, which we're not
1308 already using. Logic from get_sigframe in
1309 arch/i386/kernel/signal.c. */
1310 sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
1313 = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
1314 if (VG_(clo_trace_signals))
1315 VG_(dmsg)("delivering signal %d (%s) to thread %d: "
1316 "on ALT STACK (%p-%p; %ld bytes)\n",
1317 sigNo, signame(sigNo), tid, tst->altstack.ss_sp,
1318 (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
1319 (Word)tst->altstack.ss_size );
1321 /* Signal delivery to tools */
1322 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/True );
1325 esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
1327 /* Signal delivery to tools */
1328 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
1331 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
1332 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
1334 /* This may fail if the client stack is busted; if that happens,
1335 the whole process will exit rather than simply calling the
1337 VG_(sigframe_create) (tid, esp_top_of_frame, siginfo, uc,
1338 scss.scss_per_sig[sigNo].scss_handler,
1339 scss.scss_per_sig[sigNo].scss_flags,
1341 scss.scss_per_sig[sigNo].scss_restorer);
1346 static const Char *signame(Int sigNo)
1348 static Char buf[20];
1351 case VKI_SIGHUP: return "SIGHUP";
1352 case VKI_SIGINT: return "SIGINT";
1353 case VKI_SIGQUIT: return "SIGQUIT";
1354 case VKI_SIGILL: return "SIGILL";
1355 case VKI_SIGTRAP: return "SIGTRAP";
1356 case VKI_SIGABRT: return "SIGABRT";
1357 case VKI_SIGBUS: return "SIGBUS";
1358 case VKI_SIGFPE: return "SIGFPE";
1359 case VKI_SIGKILL: return "SIGKILL";
1360 case VKI_SIGUSR1: return "SIGUSR1";
1361 case VKI_SIGUSR2: return "SIGUSR2";
1362 case VKI_SIGSEGV: return "SIGSEGV";
1363 case VKI_SIGPIPE: return "SIGPIPE";
1364 case VKI_SIGALRM: return "SIGALRM";
1365 case VKI_SIGTERM: return "SIGTERM";
1366 # if defined(VKI_SIGSTKFLT)
1367 case VKI_SIGSTKFLT: return "SIGSTKFLT";
1369 case VKI_SIGCHLD: return "SIGCHLD";
1370 case VKI_SIGCONT: return "SIGCONT";
1371 case VKI_SIGSTOP: return "SIGSTOP";
1372 case VKI_SIGTSTP: return "SIGTSTP";
1373 case VKI_SIGTTIN: return "SIGTTIN";
1374 case VKI_SIGTTOU: return "SIGTTOU";
1375 case VKI_SIGURG: return "SIGURG";
1376 case VKI_SIGXCPU: return "SIGXCPU";
1377 case VKI_SIGXFSZ: return "SIGXFSZ";
1378 case VKI_SIGVTALRM: return "SIGVTALRM";
1379 case VKI_SIGPROF: return "SIGPROF";
1380 case VKI_SIGWINCH: return "SIGWINCH";
1381 case VKI_SIGIO: return "SIGIO";
1382 # if defined(VKI_SIGPWR)
1383 case VKI_SIGPWR: return "SIGPWR";
1385 # if defined(VKI_SIGUNUSED)
1386 case VKI_SIGUNUSED: return "SIGUNUSED";
1389 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1390 case VKI_SIGRTMIN ... VKI_SIGRTMAX:
1391 VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
1396 VG_(sprintf)(buf, "SIG%d", sigNo);
1401 /* Hit ourselves with a signal using the default handler */
1402 void VG_(kill_self)(Int sigNo)
1405 vki_sigset_t mask, origmask;
1406 vki_sigaction_toK_t sa, origsa2;
1407 vki_sigaction_fromK_t origsa;
1409 sa.ksa_handler = VKI_SIG_DFL;
1411 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
1412 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1415 VG_(sigemptyset)(&sa.sa_mask);
1417 VG_(sigaction)(sigNo, &sa, &origsa);
1419 VG_(sigemptyset)(&mask);
1420 VG_(sigaddset)(&mask, sigNo);
1421 VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
1423 r = VG_(kill)(VG_(getpid)(), sigNo);
1424 /* This sometimes fails with EPERM on Darwin. I don't know why. */
1425 /* vg_assert(r == 0); */
1427 VG_(convert_sigaction_fromK_to_toK)( &origsa, &origsa2 );
1428 VG_(sigaction)(sigNo, &origsa2, NULL);
1429 VG_(sigprocmask)(VKI_SIG_SETMASK, &origmask, NULL);
1432 // The si_code describes where the signal came from. Some come from the
1433 // kernel, eg.: seg faults, illegal opcodes. Some come from the user, eg.:
1434 // from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
1435 // request (SI_ASYNCIO). There's lots of implementation-defined leeway in
1436 // POSIX, but the user vs. kernal distinction is what we want here. We also
1437 // pass in some other details that can help when si_code is unreliable.
1438 static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
1440 #if defined(VGO_linux) || defined(VGO_aix5)
1441 // On Linux, SI_USER is zero, negative values are from the user, positive
1442 // values are from the kernel. There are SI_FROMUSER and SI_FROMKERNEL
1443 // macros but we don't use them here because other platforms don't have
1445 return ( si_code > VKI_SI_USER ? True : False );
1446 #elif defined(VGO_darwin)
1447 // On Darwin 9.6.0, the si_code is completely unreliable. It should be the
1448 // case that 0 means "user", and >0 means "kernel". But:
1449 // - For SIGSEGV, it seems quite reliable.
1450 // - For SIGBUS, it's always 2.
1451 // - For SIGFPE, it's often 0, even for kernel ones (eg.
1452 // div-by-integer-zero always gives zero).
1453 // - For SIGILL, it's unclear.
1454 // - For SIGTRAP, it's always 1.
1455 // You can see the "NOTIMP" (not implemented) status of a number of the
1456 // sub-cases in sys/signal.h. Hopefully future versions of Darwin will
1459 // If we're blocked waiting on a syscall, it must be a user signal, because
1460 // the kernel won't generate sync signals within syscalls.
1461 if (VG_(threads)[tid].status == VgTs_WaitSys) {
1464 // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
1465 } else if (SIGSEGV == signum) {
1466 return ( si_code > 0 ? True : False );
1468 // If it's anything else, assume it's kernel-generated. Reason being that
1469 // kernel-generated sync signals are more common, and it's probable that
1470 // misdiagnosing a user signal as a kernel signal is better than the
1475 #elif defined(VGO_l4re)
1482 // This is an arbitrary si_code that we only use internally. It corresponds
1483 // to the value SI_KERNEL on Linux, but that's not really of any significance
1484 // as far as I can determine.
1485 #define VKI_SEGV_MADE_UP_GPF 0x80
1488 Perform the default action of a signal. If the signal is fatal, it
1489 marks all threads as needing to exit, but it doesn't actually kill
1490 the process or thread.
1492 If we're not being quiet, then print out some more detail about
1493 fatal signals (esp. core dumping signals).
1495 static void default_action(const vki_siginfo_t *info, ThreadId tid)
1497 Int sigNo = info->si_signo;
1498 Bool terminate = False; /* kills process */
1499 Bool core = False; /* kills process w/ core */
1500 struct vki_rlimit corelim;
1503 vg_assert(VG_(is_running_thread)(tid));
1506 case VKI_SIGQUIT: /* core */
1507 case VKI_SIGILL: /* core */
1508 case VKI_SIGABRT: /* core */
1509 case VKI_SIGFPE: /* core */
1510 case VKI_SIGSEGV: /* core */
1511 case VKI_SIGBUS: /* core */
1512 case VKI_SIGTRAP: /* core */
1513 case VKI_SIGXCPU: /* core */
1514 case VKI_SIGXFSZ: /* core */
1519 case VKI_SIGHUP: /* term */
1520 case VKI_SIGINT: /* term */
1521 case VKI_SIGKILL: /* term - we won't see this */
1522 case VKI_SIGPIPE: /* term */
1523 case VKI_SIGALRM: /* term */
1524 case VKI_SIGTERM: /* term */
1525 case VKI_SIGUSR1: /* term */
1526 case VKI_SIGUSR2: /* term */
1527 case VKI_SIGIO: /* term */
1528 # if defined(VKI_SIGPWR)
1529 case VKI_SIGPWR: /* term */
1531 case VKI_SIGSYS: /* term */
1532 case VKI_SIGPROF: /* term */
1533 case VKI_SIGVTALRM: /* term */
1534 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1535 case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
1541 vg_assert(!core || (core && terminate));
1543 if (VG_(clo_trace_signals))
1544 VG_(dmsg)("delivering %d (code %d) to default handler; action: %s%s\n",
1545 sigNo, info->si_code, terminate ? "terminate" : "ignore",
1546 core ? "+core" : "");
1549 return; /* nothing to do */
1554 /* If they set the core-size limit to zero, don't generate a
1557 VG_(getrlimit)(VKI_RLIMIT_CORE, &corelim);
1559 if (corelim.rlim_cur == 0)
1563 if ( (VG_(clo_verbosity) > 1 ||
1564 (could_core && is_signal_from_kernel(tid, sigNo, info->si_code))
1569 "Process terminating with default action of signal %d (%s)%s\n",
1570 sigNo, signame(sigNo), core ? ": dumping core" : "");
1572 /* Be helpful - decode some more details about this fault */
1573 if (is_signal_from_kernel(tid, sigNo, info->si_code)) {
1574 const Char *event = NULL;
1575 Bool haveaddr = True;
1579 switch(info->si_code) {
1580 case VKI_SEGV_MAPERR: event = "Access not within mapped region";
1582 case VKI_SEGV_ACCERR: event = "Bad permissions for mapped region";
1584 case VKI_SEGV_MADE_UP_GPF:
1585 /* General Protection Fault: The CPU/kernel
1586 isn't telling us anything useful, but this
1587 is commonly the result of exceeding a
1589 event = "General Protection Fault";
1596 VG_(am_show_nsegments)(0,"post segfault");
1597 VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1604 switch(info->si_code) {
1605 case VKI_ILL_ILLOPC: event = "Illegal opcode"; break;
1606 case VKI_ILL_ILLOPN: event = "Illegal operand"; break;
1607 case VKI_ILL_ILLADR: event = "Illegal addressing mode"; break;
1608 case VKI_ILL_ILLTRP: event = "Illegal trap"; break;
1609 case VKI_ILL_PRVOPC: event = "Privileged opcode"; break;
1610 case VKI_ILL_PRVREG: event = "Privileged register"; break;
1611 case VKI_ILL_COPROC: event = "Coprocessor error"; break;
1612 case VKI_ILL_BADSTK: event = "Internal stack error"; break;
1617 switch (info->si_code) {
1618 case VKI_FPE_INTDIV: event = "Integer divide by zero"; break;
1619 case VKI_FPE_INTOVF: event = "Integer overflow"; break;
1620 case VKI_FPE_FLTDIV: event = "FP divide by zero"; break;
1621 case VKI_FPE_FLTOVF: event = "FP overflow"; break;
1622 case VKI_FPE_FLTUND: event = "FP underflow"; break;
1623 case VKI_FPE_FLTRES: event = "FP inexact"; break;
1624 case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
1625 case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
1630 switch (info->si_code) {
1631 case VKI_BUS_ADRALN: event = "Invalid address alignment"; break;
1632 case VKI_BUS_ADRERR: event = "Non-existent physical address"; break;
1633 case VKI_BUS_OBJERR: event = "Hardware error"; break;
1636 } /* switch (sigNo) */
1638 if (event != NULL) {
1640 VG_(umsg)(" %s at address %p\n",
1641 event, info->VKI_SIGINFO_si_addr);
1643 VG_(umsg)(" %s\n", event);
1646 /* Print a stack trace. Be cautious if the thread's SP is in an
1647 obviously stupid place (not mapped readable) that would
1648 likely cause a segfault. */
1649 if (VG_(is_valid_tid)(tid)) {
1650 ExeContext* ec = VG_(am_is_valid_for_client)
1651 (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
1652 ? VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ )
1653 : VG_(record_depth_1_ExeContext)( tid );
1655 VG_(pp_ExeContext)( ec );
1657 if (sigNo == VKI_SIGSEGV
1658 && info && is_signal_from_kernel(tid, sigNo, info->si_code)
1659 && info->si_code == VKI_SEGV_MAPERR) {
1660 VG_(umsg)(" If you believe this happened as a result of a stack\n" );
1661 VG_(umsg)(" overflow in your program's main thread (unlikely but\n");
1662 VG_(umsg)(" possible), you can try to increase the size of the\n" );
1663 VG_(umsg)(" main thread stack using the --main-stacksize= flag.\n" );
1664 // FIXME: assumes main ThreadId == 1
1665 if (VG_(is_valid_tid)(1)) {
1667 " The main thread stack size used in this run was %d.\n",
1668 (Int)VG_(threads)[1].client_stack_szB);
1673 if (VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) )) {
1674 VG_(start_debugger)( tid );
1678 const static struct vki_rlimit zero = { 0, 0 };
1680 VG_(make_coredump)(tid, info, corelim.rlim_cur);
1682 /* Make sure we don't get a confusing kernel-generated
1683 coredump when we finally exit */
1684 VG_(setrlimit)(VKI_RLIMIT_CORE, &zero);
1687 /* stash fatal signal in main thread */
1689 //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
1692 VG_(nuke_all_threads_except)(tid, VgSrc_FatalSig);
1693 VG_(threads)[tid].exitreason = VgSrc_FatalSig;
1694 VG_(threads)[tid].os_state.fatalsig = sigNo;
1698 This does the business of delivering a signal to a thread. It may
1699 be called from either a real signal handler, or from normal code to
1700 cause the thread to enter the signal handler.
1702 This updates the thread state, but it does not set it to be
1705 static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info,
1706 const struct vki_ucontext *uc )
1708 #if defined(VGO_l4re)
1709 VG_(unimplemented)((char *)__func__);
1711 Int sigNo = info->si_signo;
1712 SCSS_Per_Signal *handler = &scss.scss_per_sig[sigNo];
1714 ThreadState *tst = VG_(get_ThreadState)(tid);
1716 if (VG_(clo_trace_signals))
1717 VG_(dmsg)("delivering signal %d (%s):%d to thread %d\n",
1718 sigNo, signame(sigNo), info->si_code, tid );
1720 if (sigNo == VG_SIGVGKILL) {
1721 /* If this is a SIGVGKILL, we're expecting it to interrupt any
1722 blocked syscall. It doesn't matter whether the VCPU state is
1723 set to restart or not, because we don't expect it will
1724 execute any more client instructions. */
1725 vg_assert(VG_(is_exiting)(tid));
1729 /* If the client specifies SIG_IGN, treat it as SIG_DFL.
1731 If deliver_signal() is being called on a thread, we want
1732 the signal to get through no matter what; if they're ignoring
1733 it, then we do this override (this is so we can send it SIGSEGV,
1735 handler_fn = handler->scss_handler;
1736 if (handler_fn == VKI_SIG_IGN)
1737 handler_fn = VKI_SIG_DFL;
1739 vg_assert(handler_fn != VKI_SIG_IGN);
1741 if (handler_fn == VKI_SIG_DFL) {
1742 default_action(info, tid);
1744 /* Create a signal delivery frame, and set the client's %ESP and
1745 %EIP so that when execution continues, we will enter the
1746 signal handler with the frame on top of the client's stack,
1749 Signal delivery can fail if the client stack is too small or
1750 missing, and we can't push the frame. If that happens,
1751 push_signal_frame will cause the whole process to exit when
1752 we next hit the scheduler.
1754 vg_assert(VG_(is_valid_tid)(tid));
1756 push_signal_frame ( tid, info, uc );
1758 if (handler->scss_flags & VKI_SA_ONESHOT) {
1759 /* Do the ONESHOT thing. */
1760 handler->scss_handler = VKI_SIG_DFL;
1762 handle_SCSS_change( False /* lazy update */ );
1766 tst->sig_mask is the current signal mask
1767 tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
1768 handler->scss_mask is the mask set by the handler
1770 Handler gets a mask of tmp_sig_mask|handler_mask|signo
1772 tst->sig_mask = tst->tmp_sig_mask;
1773 if (!(handler->scss_flags & VKI_SA_NOMASK)) {
1774 VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
1775 VG_(sigaddset)(&tst->sig_mask, sigNo);
1776 tst->tmp_sig_mask = tst->sig_mask;
1781 /* Thread state is ready to go - just add Runnable */
1784 static void resume_scheduler(ThreadId tid)
1786 ThreadState *tst = VG_(get_ThreadState)(tid);
1788 vg_assert(tst->os_state.lwpid == VG_(gettid)());
1790 if (tst->sched_jmpbuf_valid) {
1791 /* Can't continue; must longjmp back to the scheduler and thus
1792 enter the sighandler immediately. */
1793 __builtin_longjmp(tst->sched_jmpbuf, True);
1797 static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
1799 #if defined(VGO_l4re)
1800 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1801 VG_(message)(Vg_DebugMsg,"%s(tid=%d, addr=%p, si_code=%d)\n", __func__, tid, addr, si_code);
1803 enter_kdebug("synth_fault_common");
1808 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1810 VG_(memset)(&info, 0, sizeof(info));
1811 info.si_signo = VKI_SIGSEGV;
1812 info.si_code = si_code;
1813 info.VKI_SIGINFO_si_addr = (void*)addr;
1815 /* If they're trying to block the signal, force it to be delivered */
1816 if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
1817 VG_(set_default_handler)(VKI_SIGSEGV);
1819 deliver_signal(tid, &info, NULL);
1823 // Synthesize a fault where the address is OK, but the page
1824 // permissions are bad.
1825 void VG_(synth_fault_perms)(ThreadId tid, Addr addr)
1827 synth_fault_common(tid, addr, VKI_SEGV_ACCERR);
1830 // Synthesize a fault where the address there's nothing mapped at the address.
1831 void VG_(synth_fault_mapping)(ThreadId tid, Addr addr)
1833 synth_fault_common(tid, addr, VKI_SEGV_MAPERR);
1836 // Synthesize a misc memory fault.
1837 void VG_(synth_fault)(ThreadId tid)
1839 synth_fault_common(tid, 0, VKI_SEGV_MADE_UP_GPF);
1842 // Synthesise a SIGILL.
1843 void VG_(synth_sigill)(ThreadId tid, Addr addr)
1847 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1849 VG_(memset)(&info, 0, sizeof(info));
1850 info.si_signo = VKI_SIGILL;
1851 info.si_code = VKI_ILL_ILLOPC; /* jrs: no idea what this should be */
1852 info.VKI_SIGINFO_si_addr = (void*)addr;
1854 resume_scheduler(tid);
1855 deliver_signal(tid, &info, NULL);
1858 // Synthesise a SIGBUS.
1859 void VG_(synth_sigbus)(ThreadId tid)
1863 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1865 VG_(memset)(&info, 0, sizeof(info));
1866 info.si_signo = VKI_SIGBUS;
1867 /* There are several meanings to SIGBUS (as per POSIX, presumably),
1868 but the most widely understood is "invalid address alignment",
1869 so let's use that. */
1870 info.si_code = VKI_BUS_ADRALN;
1871 /* If we knew the invalid address in question, we could put it
1872 in .si_addr. Oh well. */
1873 /* info.VKI_SIGINFO_si_addr = (void*)addr; */
1875 resume_scheduler(tid);
1876 deliver_signal(tid, &info, NULL);
1879 // Synthesise a SIGTRAP.
1880 void VG_(synth_sigtrap)(ThreadId tid)
1883 struct vki_ucontext uc;
1884 # if defined(VGP_x86_darwin)
1885 struct __darwin_mcontext32 mc;
1886 # elif defined(VGP_amd64_darwin)
1887 struct __darwin_mcontext64 mc;
1890 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1892 VG_(memset)(&info, 0, sizeof(info));
1893 VG_(memset)(&uc, 0, sizeof(uc));
1894 info.si_signo = VKI_SIGTRAP;
1895 info.si_code = VKI_TRAP_BRKPT; /* tjh: only ever called for a brkpt ins */
1897 # if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
1898 uc.uc_mcontext.trapno = 3; /* tjh: this is the x86 trap number
1899 for a breakpoint trap... */
1900 uc.uc_mcontext.err = 0; /* tjh: no error code for x86
1901 breakpoint trap... */
1902 # elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1903 /* the same thing, but using Darwin field/struct names */
1904 VG_(memset)(&mc, 0, sizeof(mc));
1905 uc.uc_mcontext = &mc;
1906 uc.uc_mcontext->__es.__trapno = 3;
1907 uc.uc_mcontext->__es.__err = 0;
1910 resume_scheduler(tid);
1911 deliver_signal(tid, &info, &uc);
1914 /* Make a signal pending for a thread, for later delivery.
1915 VG_(poll_signals) will arrange for it to be delivered at the right
1918 tid==0 means add it to the process-wide queue, and not sent it to a
1922 void queue_signal(ThreadId tid, const vki_siginfo_t *si)
1926 vki_sigset_t savedmask;
1928 tst = VG_(get_ThreadState)(tid);
1930 /* Protect the signal queue against async deliveries */
1931 block_all_host_signals(&savedmask);
1933 if (tst->sig_queue == NULL) {
1934 tst->sig_queue = VG_(arena_malloc)(VG_AR_CORE, "signals.qs.1",
1935 sizeof(*tst->sig_queue));
1936 VG_(memset)(tst->sig_queue, 0, sizeof(*tst->sig_queue));
1938 sq = tst->sig_queue;
1940 if (VG_(clo_trace_signals))
1941 VG_(dmsg)("Queueing signal %d (idx %d) to thread %d\n",
1942 si->si_signo, sq->next, tid);
1944 /* Add signal to the queue. If the queue gets overrun, then old
1945 queued signals may get lost.
1947 XXX We should also keep a sigset of pending signals, so that at
1948 least a non-siginfo signal gets deliviered.
1950 if (sq->sigs[sq->next].si_signo != 0)
1951 VG_(umsg)("Signal %d being dropped from thread %d's queue\n",
1952 sq->sigs[sq->next].si_signo, tid);
1954 sq->sigs[sq->next] = *si;
1955 sq->next = (sq->next+1) % N_QUEUED_SIGNALS;
1957 restore_all_host_signals(&savedmask);
1961 Returns the next queued signal for thread tid which is in "set".
1962 tid==0 means process-wide signal. Set si_signo to 0 when the
1963 signal has been delivered.
1965 Must be called with all signals blocked, to protect against async
1968 static vki_siginfo_t *next_queued(ThreadId tid, const vki_sigset_t *set)
1970 ThreadState *tst = VG_(get_ThreadState)(tid);
1973 vki_siginfo_t *ret = NULL;
1975 sq = tst->sig_queue;
1982 VG_(printf)("idx=%d si_signo=%d inset=%d\n", idx,
1983 sq->sigs[idx].si_signo,
1984 VG_(sigismember)(set, sq->sigs[idx].si_signo));
1986 if (sq->sigs[idx].si_signo != 0
1987 && VG_(sigismember)(set, sq->sigs[idx].si_signo)) {
1988 if (VG_(clo_trace_signals))
1989 VG_(dmsg)("Returning queued signal %d (idx %d) for thread %d\n",
1990 sq->sigs[idx].si_signo, idx, tid);
1991 ret = &sq->sigs[idx];
1995 idx = (idx + 1) % N_QUEUED_SIGNALS;
1996 } while(idx != sq->next);
2001 static int sanitize_si_code(int si_code)
2003 #if defined(VGO_linux) || defined(VGO_l4re)
2004 /* The linux kernel uses the top 16 bits of si_code for it's own
2005 use and only exports the bottom 16 bits to user space - at least
2006 that is the theory, but it turns out that there are some kernels
2007 around that forget to mask out the top 16 bits so we do it here.
2009 The kernel treats the bottom 16 bits as signed and (when it does
2010 mask them off) sign extends them when exporting to user space so
2011 we do the same thing here. */
2012 return (Short)si_code;
2013 #elif defined(VGO_aix5) || defined(VGO_darwin)
2021 Receive an async signal from the kernel.
2023 This should only happen when the thread is blocked in a syscall,
2024 since that's the only time this set of signals is unblocked.
2027 void async_signalhandler ( Int sigNo,
2028 vki_siginfo_t *info, struct vki_ucontext *uc )
2030 #if !defined(VGO_l4re)
2031 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2032 ThreadState* tst = VG_(get_ThreadState)(tid);
2035 /* The thread isn't currently running, make it so before going on */
2036 vg_assert(tst->status == VgTs_WaitSys);
2037 VG_(acquire_BigLock)(tid, "async_signalhandler");
2039 info->si_code = sanitize_si_code(info->si_code);
2041 if (VG_(clo_trace_signals))
2042 VG_(dmsg)("async signal handler: signal=%d, tid=%d, si_code=%d\n",
2043 sigNo, tid, info->si_code);
2045 /* Update thread state properly. The signal can only have been
2046 delivered whilst we were in
2047 coregrind/m_syswrap/syscall-<PLAT>.S, and only then in the
2048 window between the two sigprocmask calls, since at all other
2049 times, we run with async signals on the host blocked. Hence
2050 make enquiries on the basis that we were in or very close to a
2051 syscall, and attempt to fix up the guest state accordingly.
2053 (normal async signals occurring during computation are blocked,
2054 but periodically polled for using VG_(sigtimedwait_zero), and
2055 delivered at a point convenient for us. Hence this routine only
2056 deals with signals that are delivered to a thread during a
2059 /* First, extract a SysRes from the ucontext_t* given to this
2060 handler. If it is subsequently established by
2061 VG_(fixup_guest_state_after_syscall_interrupted) that the
2062 syscall was complete but the results had not been committed yet
2063 to the guest state, then it'll have to commit the results itself
2064 "by hand", and so we need to extract the SysRes. Of course if
2065 the thread was not in that particular window then the
2066 SysRes will be meaningless, but that's OK too because
2067 VG_(fixup_guest_state_after_syscall_interrupted) will detect
2068 that the thread was not in said window and ignore the SysRes. */
2070 /* To make matters more complex still, on Darwin we need to know
2071 the "class" of the syscall under consideration in order to be
2072 able to extract the a correct SysRes. The class will have been
2073 saved just before the syscall, by VG_(client_syscall), into this
2074 thread's tst->arch.vex.guest_SC_CLASS. Hence: */
2075 # if defined(VGO_darwin)
2076 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc, tst->arch.vex.guest_SC_CLASS);
2078 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc);
2082 VG_(fixup_guest_state_after_syscall_interrupted)(
2084 VG_UCONTEXT_INSTR_PTR(uc),
2086 !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
2090 /* Set up the thread's state to deliver a signal */
2091 if (!is_sig_ign(info->si_signo))
2092 deliver_signal(tid, info, uc);
2094 /* It's crucial that (1) and (2) happen in the order (1) then (2)
2095 and not the other way around. (1) fixes up the guest thread
2096 state to reflect the fact that the syscall was interrupted --
2097 either to restart the syscall or to return EINTR. (2) then sets
2098 up the thread state to deliver the signal. Then we resume
2099 execution. First, the signal handler is run, since that's the
2100 second adjustment we made to the thread state. If that returns,
2101 then we resume at the guest state created by (1), viz, either
2102 the syscall returns EINTR or is restarted.
2104 If (2) was done before (1) the outcome would be completely
2105 different, and wrong. */
2107 /* longjmp back to the thread's main loop to start executing the
2109 resume_scheduler(tid);
2111 VG_(core_panic)("async_signalhandler: got unexpected signal "
2112 "while outside of scheduler");
2114 VG_(unimplemented)("unimplemented function async_signalhandler()");
2118 /* Extend the stack to cover addr. maxsize is the limit the stack can grow to.
2120 Returns True on success, False on failure.
2122 Succeeds without doing anything if addr is already within a segment.
2124 Failure could be caused by:
2125 - addr not below a growable segment
2126 - new stack size would exceed maxsize
2127 - mmap failed for some other reason
2129 Bool VG_(extend_stack)(Addr addr, UInt maxsize)
2133 /* Find the next Segment above addr */
2135 = VG_(am_find_nsegment)(addr);
2136 NSegment const* seg_next
2137 = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2140 if (seg && seg->kind == SkAnonC)
2141 /* addr is already mapped. Nothing to do. */
2144 /* Check that the requested new base is in a shrink-down
2145 reservation section which abuts an anonymous mapping that
2146 belongs to the client. */
2148 && seg->kind == SkResvn
2149 && seg->smode == SmUpper
2151 && seg_next->kind == SkAnonC
2152 && seg->end+1 == seg_next->start))
2155 udelta = VG_PGROUNDUP(seg_next->start - addr);
2156 VG_(debugLog)(1, "signals",
2157 "extending a stack base 0x%llx down by %lld\n",
2158 (ULong)seg_next->start, (ULong)udelta);
2159 if (! VG_(am_extend_into_adjacent_reservation_client)
2160 ( (NSegment*)seg_next, -(SSizeT)udelta )) {
2161 VG_(debugLog)(1, "signals", "extending a stack base: FAILED\n");
2165 /* When we change the main stack, we have to let the stack handling
2166 code know about it. */
2167 VG_(change_stack)(VG_(clstk_id), addr, VG_(clstk_end));
2169 if (VG_(clo_sanity_level) > 2)
2170 VG_(sanity_check_general)(False);
2175 static void (*fault_catcher)(Int sig, Addr addr) = NULL;
2177 void VG_(set_fault_catcher)(void (*catcher)(Int, Addr))
2180 VG_(debugLog)(0, "signals", "set fault catcher to %p\n", catcher);
2181 vg_assert2(NULL == catcher || NULL == fault_catcher,
2182 "Fault catcher is already registered");
2184 fault_catcher = catcher;
2188 void sync_signalhandler_from_user ( ThreadId tid,
2189 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2193 /* If some user-process sent us a sync signal (ie. it's not the result
2194 of a faulting instruction), then how we treat it depends on when it
2197 if (VG_(threads)[tid].status == VgTs_WaitSys) {
2198 /* Signal arrived while we're blocked in a syscall. This means that
2199 the client's signal mask was applied. In other words, so we can't
2200 get here unless the client wants this signal right now. This means
2201 we can simply use the async_signalhandler. */
2202 if (VG_(clo_trace_signals))
2203 VG_(dmsg)("Delivering user-sent sync signal %d as async signal\n",
2206 async_signalhandler(sigNo, info, uc);
2207 VG_(core_panic)("async_signalhandler returned!?\n");
2210 /* Signal arrived while in generated client code, or while running
2211 Valgrind core code. That means that every thread has these signals
2212 unblocked, so we can't rely on the kernel to route them properly, so
2213 we need to queue them manually. */
2214 if (VG_(clo_trace_signals))
2215 VG_(dmsg)("Routing user-sent sync signal %d via queue\n", sigNo);
2217 # if defined(VGO_linux)
2218 /* On Linux, first we have to do a sanity check of the siginfo. */
2219 if (info->VKI_SIGINFO_si_pid == 0) {
2220 /* There's a per-user limit of pending siginfo signals. If
2221 you exceed this, by having more than that number of
2222 pending signals with siginfo, then new signals are
2223 delivered without siginfo. This condition can be caused
2224 by any unrelated program you're running at the same time
2225 as Valgrind, if it has a large number of pending siginfo
2226 signals which it isn't taking delivery of.
2228 Since we depend on siginfo to work out why we were sent a
2229 signal and what we should do about it, we really can't
2230 continue unless we get it. */
2231 VG_(umsg)("Signal %d (%s) appears to have lost its siginfo; "
2232 "I can't go on.\n", sigNo, signame(sigNo));
2234 " This may be because one of your programs has consumed your ration of\n"
2235 " siginfo structures. For more information, see:\n"
2236 " http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
2237 " Basically, some program on your system is building up a large queue of\n"
2238 " pending signals, and this causes the siginfo data for other signals to\n"
2239 " be dropped because it's exceeding a system limit. However, Valgrind\n"
2240 " absolutely needs siginfo for SIGSEGV. A workaround is to track down the\n"
2241 " offending program and avoid running it while using Valgrind, but there\n"
2242 " is no easy way to do this. Apparently the problem was fixed in kernel\n"
2245 /* It's a fatal signal, so we force the default handler. */
2246 VG_(set_default_handler)(sigNo);
2247 deliver_signal(tid, info, uc);
2248 resume_scheduler(tid);
2249 VG_(exit)(99); /* If we can't resume, then just exit */
2253 qtid = 0; /* shared pending by default */
2254 # if defined(VGO_linux)
2255 if (info->si_code == VKI_SI_TKILL)
2256 qtid = tid; /* directed to us specifically */
2258 queue_signal(qtid, info);
2262 /* Returns True if the sync signal was due to the stack requiring extension
2263 and the extension was successful.
2265 static Bool extend_stack_if_appropriate(ThreadId tid, vki_siginfo_t* info)
2269 NSegment const* seg;
2270 NSegment const* seg_next;
2272 if (info->si_signo != VKI_SIGSEGV)
2275 fault = (Addr)info->VKI_SIGINFO_si_addr;
2276 esp = VG_(get_SP)(tid);
2277 seg = VG_(am_find_nsegment)(fault);
2278 seg_next = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2281 if (VG_(clo_trace_signals)) {
2283 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2285 info->si_code, fault, tid, esp);
2287 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2289 info->si_code, fault, tid, esp, seg->start, seg->end);
2292 if (info->si_code == VKI_SEGV_MAPERR
2294 && seg->kind == SkResvn
2295 && seg->smode == SmUpper
2297 && seg_next->kind == SkAnonC
2298 && seg->end+1 == seg_next->start
2299 && fault >= (esp - VG_STACK_REDZONE_SZB)) {
2300 /* If the fault address is above esp but below the current known
2301 stack segment base, and it was a fault because there was
2302 nothing mapped there (as opposed to a permissions fault),
2303 then extend the stack segment.
2305 Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
2306 if (VG_(extend_stack)(base, VG_(threads)[tid].client_stack_szB)) {
2307 if (VG_(clo_trace_signals))
2308 VG_(dmsg)(" -> extended stack base to %#lx\n",
2309 VG_PGROUNDDN(fault));
2312 VG_(umsg)("Stack overflow in thread %d: can't grow stack to %#lx\n",
2322 void sync_signalhandler_from_kernel ( ThreadId tid,
2323 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2325 /* Check to see if some part of Valgrind itself is interested in faults.
2326 The fault catcher should never be set whilst we're in generated code, so
2327 check for that. AFAIK the only use of the catcher right now is
2328 memcheck's leak detector. */
2329 if (fault_catcher) {
2330 vg_assert(VG_(in_generated_code) == False);
2332 (*fault_catcher)(sigNo, (Addr)info->VKI_SIGINFO_si_addr);
2333 /* If the catcher returns, then it didn't handle the fault,
2334 so carry on panicking. */
2337 if (extend_stack_if_appropriate(tid, info)) {
2338 /* Stack extension occurred, so we don't need to do anything else; upon
2339 returning from this function, we'll restart the host (hence guest)
2342 /* OK, this is a signal we really have to deal with. If it came
2343 from the client's code, then we can jump back into the scheduler
2344 and have it delivered. Otherwise it's a Valgrind bug. */
2345 ThreadState *tst = VG_(get_ThreadState)(tid);
2347 if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
2348 /* signal is blocked, but they're not allowed to block faults */
2349 VG_(set_default_handler)(sigNo);
2352 if (VG_(in_generated_code)) {
2353 /* Can't continue; must longjmp back to the scheduler and thus
2354 enter the sighandler immediately. */
2355 deliver_signal(tid, info, uc);
2356 resume_scheduler(tid);
2359 /* If resume_scheduler returns or its our fault, it means we
2360 don't have longjmp set up, implying that we weren't running
2361 client code, and therefore it was actually generated by
2362 Valgrind internally.
2364 VG_(dmsg)("VALGRIND INTERNAL ERROR: Valgrind received "
2365 "a signal %d (%s) - exiting\n",
2366 sigNo, signame(sigNo));
2368 VG_(dmsg)("si_code=%x; Faulting address: %p; sp: %#lx\n",
2369 info->si_code, info->VKI_SIGINFO_si_addr,
2370 VG_UCONTEXT_STACK_PTR(uc));
2373 VG_(kill_self)(sigNo); /* generate a core dump */
2375 //if (tid == 0) /* could happen after everyone has exited */
2376 // tid = VG_(master_tid);
2377 vg_assert(tid != 0);
2379 UnwindStartRegs startRegs;
2380 VG_(memset)(&startRegs, 0, sizeof(startRegs));
2382 VG_UCONTEXT_TO_UnwindStartRegs(&startRegs, uc);
2383 VG_(core_panic_at)("Killed by fatal signal", &startRegs);
2388 Receive a sync signal from the host.
2391 void sync_signalhandler ( Int sigNo,
2392 vki_siginfo_t *info, struct vki_ucontext *uc )
2394 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2398 VG_(printf)("sync_sighandler(%d, %p, %p)\n", sigNo, info, uc);
2400 vg_assert(info != NULL);
2401 vg_assert(info->si_signo == sigNo);
2402 vg_assert(sigNo == VKI_SIGSEGV ||
2403 sigNo == VKI_SIGBUS ||
2404 sigNo == VKI_SIGFPE ||
2405 sigNo == VKI_SIGILL ||
2406 sigNo == VKI_SIGTRAP);
2408 info->si_code = sanitize_si_code(info->si_code);
2410 from_user = !is_signal_from_kernel(tid, sigNo, info->si_code);
2412 if (VG_(clo_trace_signals)) {
2413 VG_(dmsg)("sync signal handler: "
2414 "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s\n",
2415 sigNo, info->si_code, VG_(get_IP)(tid),
2416 VG_UCONTEXT_INSTR_PTR(uc),
2417 ( from_user ? "user" : "kernel" ));
2419 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
2423 VG_(printf)("info->si_signo %d\n", info->si_signo);
2424 VG_(printf)("info->si_errno %d\n", info->si_errno);
2425 VG_(printf)("info->si_code %d\n", info->si_code);
2426 VG_(printf)("info->si_pid %d\n", info->si_pid);
2427 VG_(printf)("info->si_uid %d\n", info->si_uid);
2428 VG_(printf)("info->si_status %d\n", info->si_status);
2429 VG_(printf)("info->si_addr %p\n", info->si_addr);
2433 /* Figure out if the signal is being sent from outside the process.
2434 (Why do we care?) If the signal is from the user rather than the
2435 kernel, then treat it more like an async signal than a sync signal --
2436 that is, merely queue it for later delivery. */
2438 sync_signalhandler_from_user( tid, sigNo, info, uc);
2440 sync_signalhandler_from_kernel(tid, sigNo, info, uc);
2446 Kill this thread. Makes it leave any syscall it might be currently
2447 blocked in, and return to the scheduler. This doesn't mark the thread
2448 as exiting; that's the caller's job.
2450 static void sigvgkill_handler(int signo, vki_siginfo_t *si,
2451 struct vki_ucontext *uc)
2453 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2454 ThreadStatus at_signal = VG_(threads)[tid].status;
2456 if (VG_(clo_trace_signals))
2457 VG_(dmsg)("sigvgkill for lwp %d tid %d\n", VG_(gettid)(), tid);
2459 VG_(acquire_BigLock)(tid, "sigvgkill_handler");
2461 vg_assert(signo == VG_SIGVGKILL);
2462 vg_assert(si->si_signo == signo);
2464 /* jrs 2006 August 3: the following assertion seems incorrect to
2465 me, and fails on AIX. sigvgkill could be sent to a thread which
2466 is runnable - see VG_(nuke_all_threads_except) in the scheduler.
2467 Hence comment these out ..
2469 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
2470 VG_(post_syscall)(tid);
2474 if (at_signal == VgTs_WaitSys)
2475 VG_(post_syscall)(tid);
2476 /* jrs 2006 August 3 ends */
2478 resume_scheduler(tid);
2480 VG_(core_panic)("sigvgkill_handler couldn't return to the scheduler\n");
2483 static __attribute((unused))
2484 void pp_ksigaction ( vki_sigaction_toK_t* sa )
2487 VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
2490 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2491 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2497 VG_(printf)("pp_ksigaction: { ");
2498 for (i = 1; i <= VG_(max_signal); i++)
2499 if (VG_(sigismember(&(sa->sa_mask),i)))
2500 VG_(printf)("%d ", i);
2505 Force signal handler to default
2507 void VG_(set_default_handler)(Int signo)
2509 vki_sigaction_toK_t sa;
2511 sa.ksa_handler = VKI_SIG_DFL;
2513 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2514 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2517 VG_(sigemptyset)(&sa.sa_mask);
2519 VG_(do_sys_sigaction)(signo, &sa, NULL);
2523 Poll for pending signals, and set the next one up for delivery.
2525 void VG_(poll_signals)(ThreadId tid)
2527 vki_siginfo_t si, *sip;
2528 vki_sigset_t pollset;
2529 ThreadState *tst = VG_(get_ThreadState)(tid);
2530 vki_sigset_t saved_mask;
2532 /* look for all the signals this thread isn't blocking */
2533 /* pollset = ~tst->sig_mask */
2534 VG_(sigcomplementset)( &pollset, &tst->sig_mask );
2536 block_all_host_signals(&saved_mask); // protect signal queue
2538 /* First look for any queued pending signals */
2539 sip = next_queued(tid, &pollset); /* this thread */
2542 sip = next_queued(0, &pollset); /* process-wide */
2544 /* If there was nothing queued, ask the kernel for a pending signal */
2545 if (sip == NULL && VG_(sigtimedwait_zero)(&pollset, &si) > 0) {
2546 if (VG_(clo_trace_signals))
2547 VG_(dmsg)("poll_signals: got signal %d for thread %d\n",
2553 /* OK, something to do; deliver it */
2554 if (VG_(clo_trace_signals))
2555 VG_(dmsg)("Polling found signal %d for tid %d\n", sip->si_signo, tid);
2556 if (!is_sig_ign(sip->si_signo))
2557 deliver_signal(tid, sip, NULL);
2558 else if (VG_(clo_trace_signals))
2559 VG_(dmsg)(" signal %d ignored\n", sip->si_signo);
2561 sip->si_signo = 0; /* remove from signal queue, if that's
2562 where it came from */
2565 restore_all_host_signals(&saved_mask);
2568 /* At startup, copy the process' real signal state to the SCSS.
2569 Whilst doing this, block all real signals. Then calculate SKSS and
2570 set the kernel to that. Also initialise DCSS.
2572 void VG_(sigstartup_actions) ( void )
2574 #if defined(VGO_l4re)
2575 VG_(unimplemented)((char *)__func__);
2577 Int i, ret, vKI_SIGRTMIN;
2578 vki_sigset_t saved_procmask;
2579 vki_sigaction_fromK_t sa;
2581 VG_(memset)(&scss, 0, sizeof(scss));
2582 VG_(memset)(&skss, 0, sizeof(skss));
2584 # if defined(VKI_SIGRTMIN)
2585 vKI_SIGRTMIN = VKI_SIGRTMIN;
2587 vKI_SIGRTMIN = 0; /* eg Darwin */
2590 /* VG_(printf)("SIGSTARTUP\n"); */
2591 /* Block all signals. saved_procmask remembers the previous mask,
2592 which the first thread inherits.
2594 block_all_host_signals( &saved_procmask );
2596 /* Copy per-signal settings to SCSS. */
2597 for (i = 1; i <= _VKI_NSIG; i++) {
2598 /* Get the old host action */
2599 ret = VG_(sigaction)(i, NULL, &sa);
2601 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2602 /* apparently we may not even ask about the disposition of these
2603 signals, let alone change them */
2604 if (ret != 0 && (i == VKI_SIGKILL || i == VKI_SIGSTOP))
2611 /* Try setting it back to see if this signal is really
2613 if (vKI_SIGRTMIN > 0 /* it actually exists on this platform */
2614 && i >= vKI_SIGRTMIN) {
2615 vki_sigaction_toK_t tsa, sa2;
2617 tsa.ksa_handler = (void *)sync_signalhandler;
2618 tsa.sa_flags = VKI_SA_SIGINFO;
2619 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2620 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2621 tsa.sa_restorer = 0;
2623 VG_(sigfillset)(&tsa.sa_mask);
2625 /* try setting it to some arbitrary handler */
2626 if (VG_(sigaction)(i, &tsa, NULL) != 0) {
2627 /* failed - not really usable */
2631 VG_(convert_sigaction_fromK_to_toK)( &sa, &sa2 );
2632 ret = VG_(sigaction)(i, &sa2, NULL);
2633 vg_assert(ret == 0);
2636 VG_(max_signal) = i;
2638 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
2639 VG_(printf)("snaffling handler 0x%lx for signal %d\n",
2640 (Addr)(sa.ksa_handler), i );
2642 scss.scss_per_sig[i].scss_handler = sa.ksa_handler;
2643 scss.scss_per_sig[i].scss_flags = sa.sa_flags;
2644 scss.scss_per_sig[i].scss_mask = sa.sa_mask;
2646 scss.scss_per_sig[i].scss_restorer = NULL;
2647 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2648 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2649 scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
2652 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2653 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2654 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2656 /* We can't know what it was, because Darwin's sys_sigaction
2661 if (VG_(clo_trace_signals))
2662 VG_(dmsg)("Max kernel-supported signal is %d\n", VG_(max_signal));
2664 /* Our private internal signals are treated as ignored */
2665 scss.scss_per_sig[VG_SIGVGKILL].scss_handler = VKI_SIG_IGN;
2666 scss.scss_per_sig[VG_SIGVGKILL].scss_flags = VKI_SA_SIGINFO;
2667 VG_(sigfillset)(&scss.scss_per_sig[VG_SIGVGKILL].scss_mask);
2669 /* Copy the process' signal mask into the root thread. */
2670 vg_assert(VG_(threads)[1].status == VgTs_Init);
2671 for (i = 2; i < VG_N_THREADS; i++)
2672 vg_assert(VG_(threads)[i].status == VgTs_Empty);
2674 VG_(threads)[1].sig_mask = saved_procmask;
2675 VG_(threads)[1].tmp_sig_mask = saved_procmask;
2677 /* Calculate SKSS and apply it. This also sets the initial kernel
2678 mask we need to run with. */
2679 handle_SCSS_change( True /* forced update */ );
2681 /* Leave with all signals still blocked; the thread scheduler loop
2682 will set the appropriate mask at the appropriate time. */
2686 /*--------------------------------------------------------------------*/
2688 /*--------------------------------------------------------------------*/