2 /*--------------------------------------------------------------------*/
3 /*--- Implementation of POSIX signals. m_signals.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2010 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
34 There are 4 distinct classes of signal:
36 1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
37 TRAP): these are signals as a result of an instruction fault. If
38 we get one while running client code, then we just do the
39 appropriate thing. If it happens while running Valgrind code, then
40 it indicates a Valgrind bug. Note that we "manually" implement
41 automatic stack growth, such that if a fault happens near the
42 client process stack, it is extended in the same way the kernel
43 would, and the fault is never reported to the client program.
45 2. Asynchronous variants of the above signals: If the kernel tries
46 to deliver a sync signal while it is blocked, it just kills the
47 process. Therefore, we can't block those signals if we want to be
48 able to report on bugs in Valgrind. This means that we're also
49 open to receiving those signals from other processes, sent with
50 kill. We could get away with just dropping them, since they aren't
51 really signals that processes send to each other.
53 3. Synchronous, general signals. If a thread/process sends itself
54 a signal with kill, its expected to be synchronous: ie, the signal
55 will have been delivered by the time the syscall finishes.
57 4. Asynchronous, general signals. All other signals, sent by
58 another process with kill. These are generally blocked, except for
59 two special cases: we poll for them each time we're about to run a
60 thread for a time quanta, and while running blocking syscalls.
63 In addition, we reserve one signal for internal use: SIGVGKILL.
64 SIGVGKILL is used to terminate threads. When one thread wants
65 another to exit, it will set its exitreason and send it SIGVGKILL
66 if it appears to be blocked in a syscall.
69 We use a kernel thread for each application thread. When the
70 thread allows itself to be open to signals, it sets the thread
71 signal mask to what the client application set it to. This means
72 that we get the kernel to do all signal routing: under Valgrind,
73 signals get delivered in the same way as in the non-Valgrind case
74 (the exception being for the sync signal set, since they're almost
81 First off, we take note of the client's requests (via sys_sigaction
82 and sys_sigprocmask) to set the signal state (handlers for each
83 signal, which are process-wide, + a mask for each signal, which is
84 per-thread). This info is duly recorded in the SCSS (static Client
85 signal state) in m_signals.c, and if the client later queries what
86 the state is, we merely fish the relevant info out of SCSS and give
89 However, we set the real signal state in the kernel to something
90 entirely different. This is recorded in SKSS, the static Kernel
91 signal state. What's nice (to the extent that anything is nice w.r.t
92 signals) is that there's a pure function to calculate SKSS from SCSS,
93 calculate_SKSS_from_SCSS. So when the client changes SCSS then we
94 recompute the associated SKSS and apply any changes from the previous
95 SKSS through to the kernel.
97 Now, that said, the general scheme we have now is, that regardless of
98 what the client puts into the SCSS (viz, asks for), what we would
99 like to do is as follows:
101 (1) run code on the virtual CPU with all signals blocked
103 (2) at convenient moments for us (that is, when the VCPU stops, and
104 control is back with the scheduler), ask the kernel "do you have
105 any signals for me?" and if it does, collect up the info, and
106 deliver them to the client (by building sigframes).
108 And that's almost what we do. The signal polling is done by
109 VG_(poll_signals), which calls through to VG_(sigtimedwait_zero) to
110 do the dirty work. (of which more later).
112 By polling signals, rather than catching them, we get to deal with
113 them only at convenient moments, rather than having to recover from
114 taking a signal while generated code is running.
116 Now unfortunately .. the above scheme only works for so-called async
117 signals. An async signal is one which isn't associated with any
118 particular instruction, eg Control-C (SIGINT). For those, it doesn't
119 matter if we don't deliver the signal to the client immediately; it
120 only matters that we deliver it eventually. Hence polling is OK.
122 But the other group -- sync signals -- are all related by the fact
123 that they are various ways for the host CPU to fail to execute an
124 instruction: SIGILL, SIGSEGV, SIGFPU. And they can't be deferred,
125 because obviously if a host instruction can't execute, well then we
126 have to immediately do Plan B, whatever that is.
128 So the next approximation of what happens is:
130 (1) run code on vcpu with all async signals blocked
132 (2) at convenient moments (when NOT running the vcpu), poll for async
135 (1) and (2) together imply that if the host does deliver a signal to
136 async_signalhandler while the VCPU is running, something's
139 (3) when running code on vcpu, don't block sync signals. Instead
140 register sync_signalhandler and catch any such via that. Of
141 course, that means an ugly recovery path if we do -- the
142 sync_signalhandler has to longjump, exiting out of the generated
143 code, and the assembly-dispatcher thingy that runs it, and gets
144 caught in m_scheduler, which then tells m_signals to deliver the
147 Now naturally (ha ha) even that might be tolerable, but there's
148 something worse: dealing with signals delivered to threads in
151 Obviously from the above, SKSS's signal mask (viz, what we really run
152 with) is way different from SCSS's signal mask (viz, what the client
153 thread thought it asked for). (eg) It may well be that the client
154 did not block control-C, so that it just expects to drop dead if it
155 receives ^C whilst blocked in a syscall, but by default we are
156 running with all async signals blocked, and so that signal could be
157 arbitrarily delayed, or perhaps even lost (not sure).
159 So what we have to do, when doing any syscall which SfMayBlock, is to
160 quickly switch in the SCSS-specified signal mask just before the
161 syscall, and switch it back just afterwards, and hope that we don't
162 get caught up in some wierd race condition. This is the primary
163 purpose of the ultra-magical pieces of assembly code in
164 coregrind/m_syswrap/syscall-<plat>.S
168 The ways in which V can come to hear of signals that need to be
169 forwarded to the client as are follows:
171 sync signals: can arrive at any time whatsoever. These are caught
172 by sync_signalhandler
176 if running generated code
177 then these are blocked, so we don't expect to catch them in
181 if thread is blocked in a syscall marked SfMayBlock
182 then signals may be delivered to async_sighandler, since we
183 temporarily unblocked them for the duration of the syscall,
184 by using the real (SCSS) mask for this thread
186 else we're doing misc housekeeping activities (eg, making a translation,
187 washing our hair, etc). As in the normal case, these signals are
188 blocked, but we can and do poll for them using VG_(poll_signals).
190 Now, re VG_(poll_signals), it polls the kernel by doing
191 VG_(sigtimedwait_zero). This is trivial on Linux, since it's just a
192 syscall. But on Darwin and AIX, we have to cobble together the
193 functionality in a tedious, longwinded and probably error-prone way.
196 #include "pub_core_basics.h"
197 #include "pub_core_vki.h"
198 #include "pub_core_vkiscnums.h"
199 #include "pub_core_debuglog.h"
200 #include "pub_core_threadstate.h"
201 #include "pub_core_xarray.h"
202 #include "pub_core_clientstate.h"
203 #include "pub_core_aspacemgr.h"
204 #include "pub_core_debugger.h" // For VG_(start_debugger)
205 #include "pub_core_errormgr.h"
206 #include "pub_core_libcbase.h"
207 #include "pub_core_libcassert.h"
208 #include "pub_core_libcprint.h"
209 #include "pub_core_libcproc.h"
210 #include "pub_core_libcsignal.h"
211 #include "pub_core_machine.h"
212 #include "pub_core_mallocfree.h"
213 #include "pub_core_options.h"
214 #include "pub_core_scheduler.h"
215 #include "pub_core_signals.h"
216 #include "pub_core_sigframe.h" // For VG_(sigframe_create)()
217 #include "pub_core_stacks.h" // For VG_(change_stack)()
218 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
219 #include "pub_core_syscall.h"
220 #include "pub_core_syswrap.h"
221 #include "pub_core_tooliface.h"
222 #include "pub_core_coredump.h"
225 /* ---------------------------------------------------------------------
227 ------------------------------------------------------------------ */
229 static void sync_signalhandler ( Int sigNo, vki_siginfo_t *info,
230 struct vki_ucontext * );
231 static void async_signalhandler ( Int sigNo, vki_siginfo_t *info,
232 struct vki_ucontext * );
233 static void sigvgkill_handler ( Int sigNo, vki_siginfo_t *info,
234 struct vki_ucontext * );
236 static const Char *signame(Int sigNo);
238 /* Maximum usable signal. */
239 Int VG_(max_signal) = _VKI_NSIG;
241 #define N_QUEUED_SIGNALS 8
243 typedef struct SigQueue {
245 vki_siginfo_t sigs[N_QUEUED_SIGNALS];
248 /* ------ Macros for pulling stuff out of ucontexts ------ */
250 /* Q: what does VG_UCONTEXT_SYSCALL_SYSRES do? A: let's suppose the
251 machine context (uc) reflects the situation that a syscall had just
252 completed, quite literally -- that is, that the program counter was
253 now at the instruction following the syscall. (or we're slightly
254 downstream, but we're sure no relevant register has yet changed
255 value.) Then VG_UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
256 the result of the syscall; it does this by fishing relevant bits of
257 the machine state out of the uc. Of course if the program counter
258 was somewhere else entirely then the result is likely to be
259 meaningless, so the caller of VG_UCONTEXT_SYSCALL_SYSRES has to be
260 very careful to pay attention to the results only when it is sure
261 that the said constraint on the program counter is indeed valid. */
263 #if defined(VGP_x86_linux)
264 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
265 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
266 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
267 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
268 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
269 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
270 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip); \
271 (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp); \
272 (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp; \
275 #elif defined(VGP_amd64_linux)
276 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.rip)
277 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.rsp)
278 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
279 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
280 VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
281 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
282 { (srP)->r_pc = (uc)->uc_mcontext.rip; \
283 (srP)->r_sp = (uc)->uc_mcontext.rsp; \
284 (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
287 #elif defined(VGP_ppc32_linux)
288 /* Comments from Paul Mackerras 25 Nov 05:
290 > I'm tracking down a problem where V's signal handling doesn't
291 > work properly on a ppc440gx running 2.4.20. The problem is that
292 > the ucontext being presented to V's sighandler seems completely
295 > V's kernel headers and hence ucontext layout are derived from
296 > 2.6.9. I compared include/asm-ppc/ucontext.h from 2.4.20 and
299 > Can I just check my interpretation: the 2.4.20 one contains the
300 > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
301 > to said struct? And so if V is using the 2.6.13 struct then a
302 > 2.4.20 one will make no sense to it.
304 Not quite... what is inline in the 2.4.20 version is a
305 sigcontext_struct, not an mcontext. The sigcontext looks like
308 struct sigcontext_struct {
309 unsigned long _unused[4];
311 unsigned long handler;
312 unsigned long oldmask;
313 struct pt_regs *regs;
316 The regs pointer of that struct ends up at the same offset as the
317 uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
318 same as the mc_gregs field of the mcontext. In fact the integer
319 regs are followed in memory by the floating point regs on 2.4.20.
321 Thus if you are using the 2.6 definitions, it should work on 2.4.20
322 provided that you go via uc->uc_regs rather than looking in
323 uc->uc_mcontext directly.
325 There is another subtlety: 2.4.20 doesn't save the vector regs when
326 delivering a signal, and 2.6.x only saves the vector regs if the
327 process has ever used an altivec instructions. If 2.6.x does save
328 the vector regs, it sets the MSR_VEC bit in
329 uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it. That bit
330 will always be clear under 2.4.20. So you can use that bit to tell
331 whether uc->uc_regs->mc_vregs is valid. */
332 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
333 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
334 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
335 /* Convert the values in uc_mcontext r3,cr into a SysRes. */ \
336 VG_(mk_SysRes_ppc32_linux)( \
337 (uc)->uc_regs->mc_gregs[VKI_PT_R3], \
338 (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1) \
340 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
341 { (srP)->r_pc = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_NIP]); \
342 (srP)->r_sp = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_R1]); \
343 (srP)->misc.PPC32.r_lr = (uc)->uc_regs->mc_gregs[VKI_PT_LNK]; \
346 #elif defined(VGP_ppc64_linux)
347 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
348 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
349 /* Dubious hack: if there is an error, only consider the lowest 8
350 bits of r3. memcheck/tests/post-syscall shows a case where an
351 interrupted syscall should have produced a ucontext with 0x4
352 (VKI_EINTR) in r3 but is in fact producing 0x204. */
353 /* Awaiting clarification from PaulM. Evidently 0x204 is
354 ERESTART_RESTARTBLOCK, which shouldn't have made it into user
356 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext* uc )
358 ULong err = (uc->uc_mcontext.gp_regs[VKI_PT_CCR] >> 28) & 1;
359 ULong r3 = uc->uc_mcontext.gp_regs[VKI_PT_R3];
361 return VG_(mk_SysRes_ppc64_linux)( r3, err );
363 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
364 { (srP)->r_pc = (uc)->uc_mcontext.gp_regs[VKI_PT_NIP]; \
365 (srP)->r_sp = (uc)->uc_mcontext.gp_regs[VKI_PT_R1]; \
366 (srP)->misc.PPC64.r_lr = (uc)->uc_mcontext.gp_regs[VKI_PT_LNK]; \
369 #elif defined(VGP_arm_linux)
370 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.arm_pc)
371 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.arm_sp)
372 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
373 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
374 VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
375 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
376 { (srP)->r_pc = (uc)->uc_mcontext.arm_pc; \
377 (srP)->r_sp = (uc)->uc_mcontext.arm_sp; \
378 (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
379 (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
380 (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
383 #elif defined(VGP_ppc32_aix5)
385 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
386 # include <ucontext.h>
387 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
388 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
389 ucontext_t* uc = (ucontext_t*)ucV;
390 struct __jmpbuf* mc = &(uc->uc_mcontext);
391 struct mstsave* jc = &mc->jmp_context;
394 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
395 ucontext_t* uc = (ucontext_t*)ucV;
396 struct __jmpbuf* mc = &(uc->uc_mcontext);
397 struct mstsave* jc = &mc->jmp_context;
400 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV ) {
401 ucontext_t* uc = (ucontext_t*)ucV;
402 struct __jmpbuf* mc = &(uc->uc_mcontext);
403 struct mstsave* jc = &mc->jmp_context;
404 return VG_(mk_SysRes_ppc32_aix5)( jc->gpr[3], jc->gpr[4] );
406 static inline Addr VG_UCONTEXT_LINK_REG( void* ucV ) {
407 ucontext_t* uc = (ucontext_t*)ucV;
408 struct __jmpbuf* mc = &(uc->uc_mcontext);
409 struct mstsave* jc = &mc->jmp_context;
412 static inline Addr VG_UCONTEXT_FRAME_PTR( void* ucV ) {
413 return VG_UCONTEXT_STACK_PTR(ucV);
416 #elif defined(VGP_ppc64_aix5)
418 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
419 # include <ucontext.h>
420 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
421 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
422 ucontext_t* uc = (ucontext_t*)ucV;
423 struct __jmpbuf* mc = &(uc->uc_mcontext);
424 struct __context64* jc = &mc->jmp_context;
427 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
428 ucontext_t* uc = (ucontext_t*)ucV;
429 struct __jmpbuf* mc = &(uc->uc_mcontext);
430 struct __context64* jc = &mc->jmp_context;
433 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV ) {
434 ucontext_t* uc = (ucontext_t*)ucV;
435 struct __jmpbuf* mc = &(uc->uc_mcontext);
436 struct __context64* jc = &mc->jmp_context;
437 return VG_(mk_SysRes_ppc32_aix5)( jc->gpr[3], jc->gpr[4] );
439 static inline Addr VG_UCONTEXT_LINK_REG( void* ucV ) {
440 ucontext_t* uc = (ucontext_t*)ucV;
441 struct __jmpbuf* mc = &(uc->uc_mcontext);
442 struct __context64* jc = &mc->jmp_context;
445 static inline Addr VG_UCONTEXT_FRAME_PTR( void* ucV ) {
446 return VG_UCONTEXT_STACK_PTR(ucV);
449 #elif defined(VGP_x86_darwin)
451 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
452 ucontext_t* uc = (ucontext_t*)ucV;
453 struct __darwin_mcontext32* mc = uc->uc_mcontext;
454 struct __darwin_i386_thread_state* ss = &mc->__ss;
457 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
458 ucontext_t* uc = (ucontext_t*)ucV;
459 struct __darwin_mcontext32* mc = uc->uc_mcontext;
460 struct __darwin_i386_thread_state* ss = &mc->__ss;
463 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
465 /* this is complicated by the problem that there are 3 different
466 kinds of syscalls, each with its own return convention.
467 NB: scclass is a host word, hence UWord is good for both
468 amd64-darwin and x86-darwin */
469 ucontext_t* uc = (ucontext_t*)ucV;
470 struct __darwin_mcontext32* mc = uc->uc_mcontext;
471 struct __darwin_i386_thread_state* ss = &mc->__ss;
472 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
473 UInt carry = 1 & ss->__eflags;
478 case VG_DARWIN_SYSCALL_CLASS_UNIX:
483 case VG_DARWIN_SYSCALL_CLASS_MACH:
486 case VG_DARWIN_SYSCALL_CLASS_MDEP:
493 return VG_(mk_SysRes_x86_darwin)( scclass, err ? True : False,
497 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
499 ucontext_t* uc = (ucontext_t*)(ucV);
500 struct __darwin_mcontext32* mc = uc->uc_mcontext;
501 struct __darwin_i386_thread_state* ss = &mc->__ss;
502 srP->r_pc = (ULong)(ss->__eip);
503 srP->r_sp = (ULong)(ss->__esp);
504 srP->misc.X86.r_ebp = (UInt)(ss->__ebp);
507 #elif defined(VGP_amd64_darwin)
509 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
512 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
515 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
519 static inline Addr VG_UCONTEXT_LINK_REG( void* ucV ) {
520 return 0; /* No, really. We have no LRs today. */
522 static inline Addr VG_UCONTEXT_FRAME_PTR( void* ucV ) {
526 #elif defined(VGO_l4re)
527 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
528 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
529 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.ebp)
530 # define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.eax)
531 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
532 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
533 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
534 # define VG_UCONTEXT_LINK_REG(uc) 0 /* Dude, where's my LR? */
538 # error Unknown platform
542 /* ------ Macros for pulling stuff out of siginfos ------ */
544 /* These macros allow use of uniform names when working with
545 both the Linux and AIX vki definitions. */
546 #if defined(VGO_linux)
547 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
548 # define VKI_SIGINFO_si_pid _sifields._kill._pid
549 #elif defined(VGO_aix5)
550 # define VKI_SIGINFO_si_addr si_addr
551 # define VKI_SIGINFO_si_pid si_pid
552 #elif defined(VGO_darwin)
553 # define VKI_SIGINFO_si_addr si_addr
554 # define VKI_SIGINFO_si_pid si_pid
555 #elif defined(VGO_l4re)
556 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
557 # define VKI_SIGINFO_si_pid _sifields._kill._pid
563 /* ---------------------------------------------------------------------
564 HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
565 ------------------------------------------------------------------ */
567 /* ---------------------------------------------------------------------
568 Signal state for this process.
569 ------------------------------------------------------------------ */
572 /* Base-ment of these arrays[_VKI_NSIG].
574 Valid signal numbers are 1 .. _VKI_NSIG inclusive.
575 Rather than subtracting 1 for indexing these arrays, which
576 is tedious and error-prone, they are simply dimensioned 1 larger,
577 and entry [0] is not used.
581 /* -----------------------------------------------------
582 Static client signal state (SCSS). This is the state
583 that the client thinks it has the kernel in.
584 SCSS records verbatim the client's settings. These
585 are mashed around only when SKSS is calculated from it.
586 -------------------------------------------------- */
590 void* scss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
593 vki_sigset_t scss_mask;
594 void* scss_restorer; /* where sigreturn goes */
595 void* scss_sa_tramp; /* sa_tramp setting, Darwin only */
596 /* re _restorer and _sa_tramp, we merely record the values
597 supplied when the client does 'sigaction' and give them back
598 when requested. Otherwise they are simply ignored. */
604 /* per-signal info */
605 SCSS_Per_Signal scss_per_sig[1+_VKI_NSIG];
607 /* Additional elements to SCSS not stored here:
608 - for each thread, the thread's blocking mask
609 - for each thread in WaitSIG, the set of waited-on sigs
617 /* -----------------------------------------------------
618 Static kernel signal state (SKSS). This is the state
619 that we have the kernel in. It is computed from SCSS.
620 -------------------------------------------------- */
623 sigprocmask assigns to all thread masks
624 so that at least everything is always consistent
626 SA_SIGINFO -- we always set it, and honour it for the client
627 SA_NOCLDSTOP -- passed to kernel
628 SA_ONESHOT or SA_RESETHAND -- pass through
629 SA_RESTART -- we observe this but set our handlers to always restart
630 SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
631 SA_ONSTACK -- pass through
632 SA_NOCLDWAIT -- pass through
638 void* skss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN
639 or ptr to our handler */
641 /* There is no skss_mask, since we know that we will always ask
642 for all signals to be blocked in our sighandlers. */
643 /* Also there is no skss_restorer. */
649 SKSS_Per_Signal skss_per_sig[1+_VKI_NSIG];
655 static Bool is_sig_ign(Int sigNo)
657 vg_assert(sigNo >= 1 && sigNo <= _VKI_NSIG);
659 return scss.scss_per_sig[sigNo].scss_handler == VKI_SIG_IGN;
662 /* ---------------------------------------------------------------------
663 Compute the SKSS required by the current SCSS.
664 ------------------------------------------------------------------ */
667 void pp_SKSS ( void )
670 VG_(printf)("\n\nSKSS:\n");
671 for (sig = 1; sig <= _VKI_NSIG; sig++) {
672 VG_(printf)("sig %d: handler %p, flags 0x%x\n", sig,
673 skss.skss_per_sig[sig].skss_handler,
674 skss.skss_per_sig[sig].skss_flags );
679 /* This is the core, clever bit. Computation is as follows:
682 handler = if client has a handler, then our handler
683 else if client is DFL, then our handler as well
684 else (client must be IGN)
688 void calculate_SKSS_from_SCSS ( SKSS* dst )
694 for (sig = 1; sig <= _VKI_NSIG; sig++) {
698 scss_handler = scss.scss_per_sig[sig].scss_handler;
699 scss_flags = scss.scss_per_sig[sig].scss_flags;
707 /* For these, we always want to catch them and report, even
708 if the client code doesn't. */
709 skss_handler = sync_signalhandler;
713 /* Let the kernel handle SIGCONT unless the client is actually
718 /* For signals which are have a default action of Ignore,
719 only set a handler if the client has set a signal handler.
720 Otherwise the kernel will interrupt a syscall which
721 wouldn't have otherwise been interrupted. */
722 if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
723 skss_handler = VKI_SIG_DFL;
724 else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
725 skss_handler = VKI_SIG_IGN;
727 skss_handler = async_signalhandler;
731 // VKI_SIGVG* are runtime variables, so we can't make them
732 // cases in the switch, so we handle them in the 'default' case.
733 if (sig == VG_SIGVGKILL)
734 skss_handler = sigvgkill_handler;
736 if (scss_handler == VKI_SIG_IGN)
737 skss_handler = VKI_SIG_IGN;
739 skss_handler = async_signalhandler;
748 /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
749 skss_flags |= scss_flags & (VKI_SA_NOCLDSTOP | VKI_SA_NOCLDWAIT);
751 /* SA_ONESHOT: ignore client setting */
753 /* SA_RESTART: ignore client setting and always set it for us.
754 Though we never rely on the kernel to restart a
755 syscall, we observe whether it wanted to restart the syscall
756 or not, which is needed by
757 VG_(fixup_guest_state_after_syscall_interrupted) */
758 skss_flags |= VKI_SA_RESTART;
760 /* SA_NOMASK: ignore it */
762 /* SA_ONSTACK: client setting is irrelevant here */
763 /* We don't set a signal stack, so ignore */
765 /* always ask for SA_SIGINFO */
766 skss_flags |= VKI_SA_SIGINFO;
768 /* use our own restorer */
769 skss_flags |= VKI_SA_RESTORER;
771 /* Create SKSS entry for this signal. */
772 if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
773 dst->skss_per_sig[sig].skss_handler = skss_handler;
775 dst->skss_per_sig[sig].skss_handler = VKI_SIG_DFL;
777 dst->skss_per_sig[sig].skss_flags = skss_flags;
781 vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler == VKI_SIG_DFL);
782 vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler == VKI_SIG_DFL);
789 /* ---------------------------------------------------------------------
790 After a possible SCSS change, update SKSS and the kernel itself.
791 ------------------------------------------------------------------ */
793 // We need two levels of macro-expansion here to convert __NR_rt_sigreturn
794 // to a number before converting it to a string... sigh.
795 extern void my_sigreturn(void);
797 #if defined(VGP_x86_linux)
798 # define _MY_SIGRETURN(name) \
801 " movl $" #name ", %eax\n" \
805 #elif defined(VGP_amd64_linux)
806 # define _MY_SIGRETURN(name) \
809 " movq $" #name ", %rax\n" \
813 #elif defined(VGP_ppc32_linux)
814 # define _MY_SIGRETURN(name) \
817 " li 0, " #name "\n" \
821 #elif defined(VGP_ppc64_linux)
822 # define _MY_SIGRETURN(name) \
824 ".globl my_sigreturn\n" \
825 ".section \".opd\",\"aw\"\n" \
828 ".quad .my_sigreturn,.TOC.@tocbase,0\n" \
830 ".type .my_sigreturn,@function\n" \
831 ".globl .my_sigreturn\n" \
833 " li 0, " #name "\n" \
836 #elif defined(VGP_arm_linux)
837 # define _MY_SIGRETURN(name) \
839 "my_sigreturn:\n\t" \
840 " mov r7, #" #name "\n\t" \
841 " svc 0x00000000\n" \
844 #elif defined(VGP_ppc32_aix5)
845 # define _MY_SIGRETURN(name) \
846 ".globl my_sigreturn\n" \
849 #elif defined(VGP_ppc64_aix5)
850 # define _MY_SIGRETURN(name) \
851 ".globl my_sigreturn\n" \
855 #elif defined(VGP_x86_darwin)
856 # define _MY_SIGRETURN(name) \
859 "movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
862 #elif defined(VGP_amd64_darwin)
864 # define _MY_SIGRETURN(name) \
869 #elif defined(VGO_l4re)
870 # define _MY_SIGRETURN(name) \
871 ".globl my_sigreturn\n" \
876 # error Unknown platform
879 #define MY_SIGRETURN(name) _MY_SIGRETURN(name)
881 MY_SIGRETURN(__NR_rt_sigreturn)
885 static void handle_SCSS_change ( Bool force_update )
889 vki_sigaction_toK_t ksa;
890 vki_sigaction_fromK_t ksa_old;
892 /* Remember old SKSS and calculate new one. */
894 calculate_SKSS_from_SCSS ( &skss );
896 /* Compare the new SKSS entries vs the old ones, and update kernel
897 where they differ. */
898 for (sig = 1; sig <= VG_(max_signal); sig++) {
900 /* Trying to do anything with SIGKILL is pointless; just ignore
902 if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
906 if ((skss_old.skss_per_sig[sig].skss_handler
907 == skss.skss_per_sig[sig].skss_handler)
908 && (skss_old.skss_per_sig[sig].skss_flags
909 == skss.skss_per_sig[sig].skss_flags))
914 ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
915 ksa.sa_flags = skss.skss_per_sig[sig].skss_flags;
916 # if !defined(VGP_ppc32_linux) && \
917 !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
918 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
919 ksa.sa_restorer = my_sigreturn;
921 /* Re above ifdef (also the assertion below), PaulM says:
922 The sa_restorer field is not used at all on ppc. Glibc
923 converts the sigaction you give it into a kernel sigaction,
924 but it doesn't put anything in the sa_restorer field.
927 /* block all signals in handler */
928 VG_(sigfillset)( &ksa.sa_mask );
929 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGKILL );
930 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGSTOP );
932 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
933 VG_(dmsg)("setting ksig %d to: hdlr %p, flags 0x%lx, "
934 "mask(msb..lsb) 0x%llx 0x%llx\n",
935 sig, ksa.ksa_handler,
937 _VKI_NSIG_WORDS > 1 ? (ULong)ksa.sa_mask.sig[1] : 0,
938 (ULong)ksa.sa_mask.sig[0]);
940 res = VG_(sigaction)( sig, &ksa, &ksa_old );
943 /* Since we got the old sigaction more or less for free, might
944 as well extract the maximum sanity-check value from it. */
946 vg_assert(ksa_old.ksa_handler
947 == skss_old.skss_per_sig[sig].skss_handler);
948 vg_assert(ksa_old.sa_flags
949 == skss_old.skss_per_sig[sig].skss_flags);
950 # if !defined(VGP_ppc32_linux) && \
951 !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
952 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
953 vg_assert(ksa_old.sa_restorer
956 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
957 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGSTOP );
958 vg_assert(VG_(isfullsigset)( &ksa_old.sa_mask ));
964 /* ---------------------------------------------------------------------
965 Update/query SCSS in accordance with client requests.
966 ------------------------------------------------------------------ */
968 /* Logic for this alt-stack stuff copied directly from do_sigaltstack
969 in kernel/signal.[ch] */
971 /* True if we are on the alternate signal stack. */
972 static Bool on_sig_stack ( ThreadId tid, Addr m_SP )
974 ThreadState *tst = VG_(get_ThreadState)(tid);
976 return (m_SP - (Addr)tst->altstack.ss_sp < (Addr)tst->altstack.ss_size);
979 static Int sas_ss_flags ( ThreadId tid, Addr m_SP )
981 ThreadState *tst = VG_(get_ThreadState)(tid);
983 return (tst->altstack.ss_size == 0
985 : on_sig_stack(tid, m_SP) ? VKI_SS_ONSTACK : 0);
989 SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss, vki_stack_t* oss )
993 vg_assert(VG_(is_valid_tid)(tid));
994 m_SP = VG_(get_SP)(tid);
996 if (VG_(clo_trace_signals))
997 VG_(emsg)("sys_sigaltstack: tid %d, "
998 "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)\n",
1001 (ULong)(ss ? ss->ss_size : 0),
1002 (ULong)(ss ? ss->ss_flags : 0),
1003 (void*)oss, (void*)m_SP);
1006 oss->ss_sp = VG_(threads)[tid].altstack.ss_sp;
1007 oss->ss_size = VG_(threads)[tid].altstack.ss_size;
1008 oss->ss_flags = VG_(threads)[tid].altstack.ss_flags
1009 | sas_ss_flags(tid, m_SP);
1013 if (on_sig_stack(tid, VG_(get_SP)(tid))) {
1014 return VG_(mk_SysRes_Error)( VKI_EPERM );
1016 if (ss->ss_flags != VKI_SS_DISABLE
1017 && ss->ss_flags != VKI_SS_ONSTACK
1018 && ss->ss_flags != 0) {
1019 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1021 if (ss->ss_flags == VKI_SS_DISABLE) {
1022 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
1024 if (ss->ss_size < VKI_MINSIGSTKSZ) {
1025 return VG_(mk_SysRes_Error)( VKI_ENOMEM );
1028 VG_(threads)[tid].altstack.ss_sp = ss->ss_sp;
1029 VG_(threads)[tid].altstack.ss_size = ss->ss_size;
1030 VG_(threads)[tid].altstack.ss_flags = 0;
1033 return VG_(mk_SysRes_Success)( 0 );
1037 SysRes VG_(do_sys_sigaction) ( Int signo,
1038 const vki_sigaction_toK_t* new_act,
1039 vki_sigaction_fromK_t* old_act )
1041 if (VG_(clo_trace_signals))
1042 VG_(emsg)("sys_sigaction: sigNo %d, "
1043 "new %#lx, old %#lx, new flags 0x%llx\n",
1044 signo, (UWord)new_act, (UWord)old_act,
1045 (ULong)(new_act ? new_act->sa_flags : 0));
1047 /* Rule out various error conditions. The aim is to ensure that if
1048 when the call is passed to the kernel it will definitely
1051 /* Reject out-of-range signal numbers. */
1052 if (signo < 1 || signo > VG_(max_signal)) goto bad_signo;
1054 /* don't let them use our signals */
1055 if ( (signo > VG_SIGVGRTUSERMAX)
1057 && !(new_act->ksa_handler == VKI_SIG_DFL
1058 || new_act->ksa_handler == VKI_SIG_IGN) )
1059 goto bad_signo_reserved;
1061 /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
1062 if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
1064 && new_act->ksa_handler != VKI_SIG_DFL)
1065 goto bad_sigkill_or_sigstop;
1067 /* If the client supplied non-NULL old_act, copy the relevant SCSS
1070 old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
1071 old_act->sa_flags = scss.scss_per_sig[signo].scss_flags;
1072 old_act->sa_mask = scss.scss_per_sig[signo].scss_mask;
1073 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
1074 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1075 old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
1079 /* And now copy new SCSS entry from new_act. */
1081 scss.scss_per_sig[signo].scss_handler = new_act->ksa_handler;
1082 scss.scss_per_sig[signo].scss_flags = new_act->sa_flags;
1083 scss.scss_per_sig[signo].scss_mask = new_act->sa_mask;
1085 scss.scss_per_sig[signo].scss_restorer = NULL;
1086 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
1087 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1088 scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
1091 scss.scss_per_sig[signo].scss_sa_tramp = NULL;
1092 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1093 scss.scss_per_sig[signo].scss_sa_tramp = new_act->sa_tramp;
1096 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
1097 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
1100 /* All happy bunnies ... */
1102 handle_SCSS_change( False /* lazy update */ );
1104 return VG_(mk_SysRes_Success)( 0 );
1107 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1108 VG_(umsg)("Warning: bad signal number %d in sigaction()\n", signo);
1110 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1113 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1114 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1116 VG_(umsg)(" the %s signal is used internally by Valgrind\n",
1119 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1121 bad_sigkill_or_sigstop:
1122 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1123 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1125 VG_(umsg)(" the %s signal is uncatchable\n",
1128 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1133 void do_sigprocmask_bitops ( Int vki_how,
1134 vki_sigset_t* orig_set,
1135 vki_sigset_t* modifier )
1139 VG_(sigaddset_from_set)( orig_set, modifier );
1141 case VKI_SIG_UNBLOCK:
1142 VG_(sigdelset_from_set)( orig_set, modifier );
1144 case VKI_SIG_SETMASK:
1145 *orig_set = *modifier;
1148 VG_(core_panic)("do_sigprocmask_bitops");
1154 HChar* format_sigset ( const vki_sigset_t* set )
1156 static HChar buf[128];
1159 VG_(strcpy)(buf, "");
1161 for (w = _VKI_NSIG_WORDS - 1; w >= 0; w--)
1163 # if _VKI_NSIG_BPW == 32
1164 VG_(sprintf)(buf + VG_(strlen)(buf), "%08llx",
1165 set ? (ULong)set->sig[w] : 0);
1166 # elif _VKI_NSIG_BPW == 64
1167 VG_(sprintf)(buf + VG_(strlen)(buf), "%16llx",
1168 set ? (ULong)set->sig[w] : 0);
1170 # error "Unsupported value for _VKI_NSIG_BPW"
1178 This updates the thread's signal mask. There's no such thing as a
1179 process-wide signal mask.
1181 Note that the thread signal masks are an implicit part of SCSS,
1182 which is why this routine is allowed to mess with them.
1185 void do_setmask ( ThreadId tid,
1187 vki_sigset_t* newset,
1188 vki_sigset_t* oldset )
1190 if (VG_(clo_trace_signals))
1191 VG_(emsg)("do_setmask: tid = %d how = %d (%s), newset = %p (%s)\n",
1193 how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
1194 how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
1195 how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
1196 newset, newset ? format_sigset(newset) : "NULL" );
1198 /* Just do this thread. */
1199 vg_assert(VG_(is_valid_tid)(tid));
1201 *oldset = VG_(threads)[tid].sig_mask;
1202 if (VG_(clo_trace_signals))
1203 VG_(emsg)("\toldset=%p %s\n", oldset, format_sigset(oldset));
1206 do_sigprocmask_bitops (how, &VG_(threads)[tid].sig_mask, newset );
1207 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
1208 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
1209 VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
1214 SysRes VG_(do_sys_sigprocmask) ( ThreadId tid,
1217 vki_sigset_t* oldset )
1221 case VKI_SIG_UNBLOCK:
1222 case VKI_SIG_SETMASK:
1223 vg_assert(VG_(is_valid_tid)(tid));
1224 do_setmask ( tid, how, set, oldset );
1225 return VG_(mk_SysRes_Success)( 0 );
1228 VG_(dmsg)("sigprocmask: unknown 'how' field %d\n", how);
1229 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1234 /* ---------------------------------------------------------------------
1235 LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
1236 ------------------------------------------------------------------ */
1238 /* ---------------------------------------------------------------------
1239 Handy utilities to block/restore all host signals.
1240 ------------------------------------------------------------------ */
1242 /* Block all host signals, dumping the old mask in *saved_mask. */
1243 static void block_all_host_signals ( /* OUT */ vki_sigset_t* saved_mask )
1245 #if defined(VGO_l4re)
1246 VG_(unimplemented)((char *)__func__);
1249 vki_sigset_t block_procmask;
1250 VG_(sigfillset)(&block_procmask);
1251 ret = VG_(sigprocmask)
1252 (VKI_SIG_SETMASK, &block_procmask, saved_mask);
1253 vg_assert(ret == 0);
1257 /* Restore the blocking mask using the supplied saved one. */
1258 static void restore_all_host_signals ( /* IN */ vki_sigset_t* saved_mask )
1261 ret = VG_(sigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
1262 vg_assert(ret == 0);
1265 void VG_(clear_out_queued_signals)( ThreadId tid, vki_sigset_t* saved_mask )
1267 #if defined(VGO_l4re)
1268 VG_(unimplemented)((char *)__func__);
1270 block_all_host_signals(saved_mask);
1271 if (VG_(threads)[tid].sig_queue != NULL) {
1272 VG_(arena_free)(VG_AR_CORE, VG_(threads)[tid].sig_queue);
1273 VG_(threads)[tid].sig_queue = NULL;
1275 restore_all_host_signals(saved_mask);
1279 /* ---------------------------------------------------------------------
1280 The signal simulation proper. A simplified version of what the
1282 ------------------------------------------------------------------ */
1284 /* Set up a stack frame (VgSigContext) for the client's signal
1287 void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
1288 const struct vki_ucontext *uc )
1290 #if defined(VGO_l4re)
1291 VG_(unimplemented)((char *)__func__);
1293 Addr esp_top_of_frame;
1295 Int sigNo = siginfo->si_signo;
1297 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
1298 vg_assert(VG_(is_valid_tid)(tid));
1299 tst = & VG_(threads)[tid];
1301 if (VG_(clo_trace_signals)) {
1302 VG_(dmsg)("push_signal_frame (thread %d): signal %d\n", tid, sigNo);
1303 VG_(get_and_pp_StackTrace)(tid, 10);
1306 if (/* this signal asked to run on an alt stack */
1307 (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
1308 && /* there is a defined and enabled alt stack, which we're not
1309 already using. Logic from get_sigframe in
1310 arch/i386/kernel/signal.c. */
1311 sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
1314 = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
1315 if (VG_(clo_trace_signals))
1316 VG_(dmsg)("delivering signal %d (%s) to thread %d: "
1317 "on ALT STACK (%p-%p; %ld bytes)\n",
1318 sigNo, signame(sigNo), tid, tst->altstack.ss_sp,
1319 (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
1320 (Word)tst->altstack.ss_size );
1322 /* Signal delivery to tools */
1323 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/True );
1326 esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
1328 /* Signal delivery to tools */
1329 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
1332 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
1333 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
1335 /* This may fail if the client stack is busted; if that happens,
1336 the whole process will exit rather than simply calling the
1338 VG_(sigframe_create) (tid, esp_top_of_frame, siginfo, uc,
1339 scss.scss_per_sig[sigNo].scss_handler,
1340 scss.scss_per_sig[sigNo].scss_flags,
1342 scss.scss_per_sig[sigNo].scss_restorer);
1347 static const Char *signame(Int sigNo)
1349 static Char buf[20];
1352 case VKI_SIGHUP: return "SIGHUP";
1353 case VKI_SIGINT: return "SIGINT";
1354 case VKI_SIGQUIT: return "SIGQUIT";
1355 case VKI_SIGILL: return "SIGILL";
1356 case VKI_SIGTRAP: return "SIGTRAP";
1357 case VKI_SIGABRT: return "SIGABRT";
1358 case VKI_SIGBUS: return "SIGBUS";
1359 case VKI_SIGFPE: return "SIGFPE";
1360 case VKI_SIGKILL: return "SIGKILL";
1361 case VKI_SIGUSR1: return "SIGUSR1";
1362 case VKI_SIGUSR2: return "SIGUSR2";
1363 case VKI_SIGSEGV: return "SIGSEGV";
1364 case VKI_SIGPIPE: return "SIGPIPE";
1365 case VKI_SIGALRM: return "SIGALRM";
1366 case VKI_SIGTERM: return "SIGTERM";
1367 # if defined(VKI_SIGSTKFLT)
1368 case VKI_SIGSTKFLT: return "SIGSTKFLT";
1370 case VKI_SIGCHLD: return "SIGCHLD";
1371 case VKI_SIGCONT: return "SIGCONT";
1372 case VKI_SIGSTOP: return "SIGSTOP";
1373 case VKI_SIGTSTP: return "SIGTSTP";
1374 case VKI_SIGTTIN: return "SIGTTIN";
1375 case VKI_SIGTTOU: return "SIGTTOU";
1376 case VKI_SIGURG: return "SIGURG";
1377 case VKI_SIGXCPU: return "SIGXCPU";
1378 case VKI_SIGXFSZ: return "SIGXFSZ";
1379 case VKI_SIGVTALRM: return "SIGVTALRM";
1380 case VKI_SIGPROF: return "SIGPROF";
1381 case VKI_SIGWINCH: return "SIGWINCH";
1382 case VKI_SIGIO: return "SIGIO";
1383 # if defined(VKI_SIGPWR)
1384 case VKI_SIGPWR: return "SIGPWR";
1386 # if defined(VKI_SIGUNUSED)
1387 case VKI_SIGUNUSED: return "SIGUNUSED";
1390 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1391 case VKI_SIGRTMIN ... VKI_SIGRTMAX:
1392 VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
1397 VG_(sprintf)(buf, "SIG%d", sigNo);
1402 /* Hit ourselves with a signal using the default handler */
1403 void VG_(kill_self)(Int sigNo)
1406 vki_sigset_t mask, origmask;
1407 vki_sigaction_toK_t sa, origsa2;
1408 vki_sigaction_fromK_t origsa;
1410 sa.ksa_handler = VKI_SIG_DFL;
1412 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
1413 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1416 VG_(sigemptyset)(&sa.sa_mask);
1418 VG_(sigaction)(sigNo, &sa, &origsa);
1420 VG_(sigemptyset)(&mask);
1421 VG_(sigaddset)(&mask, sigNo);
1422 VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
1424 r = VG_(kill)(VG_(getpid)(), sigNo);
1425 /* This sometimes fails with EPERM on Darwin. I don't know why. */
1426 /* vg_assert(r == 0); */
1428 VG_(convert_sigaction_fromK_to_toK)( &origsa, &origsa2 );
1429 VG_(sigaction)(sigNo, &origsa2, NULL);
1430 VG_(sigprocmask)(VKI_SIG_SETMASK, &origmask, NULL);
1433 // The si_code describes where the signal came from. Some come from the
1434 // kernel, eg.: seg faults, illegal opcodes. Some come from the user, eg.:
1435 // from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
1436 // request (SI_ASYNCIO). There's lots of implementation-defined leeway in
1437 // POSIX, but the user vs. kernal distinction is what we want here. We also
1438 // pass in some other details that can help when si_code is unreliable.
1439 static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
1441 #if defined(VGO_linux) || defined(VGO_aix5)
1442 // On Linux, SI_USER is zero, negative values are from the user, positive
1443 // values are from the kernel. There are SI_FROMUSER and SI_FROMKERNEL
1444 // macros but we don't use them here because other platforms don't have
1446 return ( si_code > VKI_SI_USER ? True : False );
1447 #elif defined(VGO_darwin)
1448 // On Darwin 9.6.0, the si_code is completely unreliable. It should be the
1449 // case that 0 means "user", and >0 means "kernel". But:
1450 // - For SIGSEGV, it seems quite reliable.
1451 // - For SIGBUS, it's always 2.
1452 // - For SIGFPE, it's often 0, even for kernel ones (eg.
1453 // div-by-integer-zero always gives zero).
1454 // - For SIGILL, it's unclear.
1455 // - For SIGTRAP, it's always 1.
1456 // You can see the "NOTIMP" (not implemented) status of a number of the
1457 // sub-cases in sys/signal.h. Hopefully future versions of Darwin will
1460 // If we're blocked waiting on a syscall, it must be a user signal, because
1461 // the kernel won't generate sync signals within syscalls.
1462 if (VG_(threads)[tid].status == VgTs_WaitSys) {
1465 // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
1466 } else if (SIGSEGV == signum) {
1467 return ( si_code > 0 ? True : False );
1469 // If it's anything else, assume it's kernel-generated. Reason being that
1470 // kernel-generated sync signals are more common, and it's probable that
1471 // misdiagnosing a user signal as a kernel signal is better than the
1476 #elif defined(VGO_l4re)
1483 // This is an arbitrary si_code that we only use internally. It corresponds
1484 // to the value SI_KERNEL on Linux, but that's not really of any significance
1485 // as far as I can determine.
1486 #define VKI_SEGV_MADE_UP_GPF 0x80
1489 Perform the default action of a signal. If the signal is fatal, it
1490 marks all threads as needing to exit, but it doesn't actually kill
1491 the process or thread.
1493 If we're not being quiet, then print out some more detail about
1494 fatal signals (esp. core dumping signals).
1496 static void default_action(const vki_siginfo_t *info, ThreadId tid)
1498 Int sigNo = info->si_signo;
1499 Bool terminate = False; /* kills process */
1500 Bool core = False; /* kills process w/ core */
1501 struct vki_rlimit corelim;
1504 vg_assert(VG_(is_running_thread)(tid));
1507 case VKI_SIGQUIT: /* core */
1508 case VKI_SIGILL: /* core */
1509 case VKI_SIGABRT: /* core */
1510 case VKI_SIGFPE: /* core */
1511 case VKI_SIGSEGV: /* core */
1512 case VKI_SIGBUS: /* core */
1513 case VKI_SIGTRAP: /* core */
1514 case VKI_SIGXCPU: /* core */
1515 case VKI_SIGXFSZ: /* core */
1520 case VKI_SIGHUP: /* term */
1521 case VKI_SIGINT: /* term */
1522 case VKI_SIGKILL: /* term - we won't see this */
1523 case VKI_SIGPIPE: /* term */
1524 case VKI_SIGALRM: /* term */
1525 case VKI_SIGTERM: /* term */
1526 case VKI_SIGUSR1: /* term */
1527 case VKI_SIGUSR2: /* term */
1528 case VKI_SIGIO: /* term */
1529 # if defined(VKI_SIGPWR)
1530 case VKI_SIGPWR: /* term */
1532 case VKI_SIGSYS: /* term */
1533 case VKI_SIGPROF: /* term */
1534 case VKI_SIGVTALRM: /* term */
1535 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1536 case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
1542 vg_assert(!core || (core && terminate));
1544 if (VG_(clo_trace_signals))
1545 VG_(dmsg)("delivering %d (code %d) to default handler; action: %s%s\n",
1546 sigNo, info->si_code, terminate ? "terminate" : "ignore",
1547 core ? "+core" : "");
1550 return; /* nothing to do */
1555 /* If they set the core-size limit to zero, don't generate a
1558 VG_(getrlimit)(VKI_RLIMIT_CORE, &corelim);
1560 if (corelim.rlim_cur == 0)
1564 if ( (VG_(clo_verbosity) > 1 ||
1565 (could_core && is_signal_from_kernel(tid, sigNo, info->si_code))
1570 "Process terminating with default action of signal %d (%s)%s\n",
1571 sigNo, signame(sigNo), core ? ": dumping core" : "");
1573 /* Be helpful - decode some more details about this fault */
1574 if (is_signal_from_kernel(tid, sigNo, info->si_code)) {
1575 const Char *event = NULL;
1576 Bool haveaddr = True;
1580 switch(info->si_code) {
1581 case VKI_SEGV_MAPERR: event = "Access not within mapped region";
1583 case VKI_SEGV_ACCERR: event = "Bad permissions for mapped region";
1585 case VKI_SEGV_MADE_UP_GPF:
1586 /* General Protection Fault: The CPU/kernel
1587 isn't telling us anything useful, but this
1588 is commonly the result of exceeding a
1590 event = "General Protection Fault";
1597 VG_(am_show_nsegments)(0,"post segfault");
1598 VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1605 switch(info->si_code) {
1606 case VKI_ILL_ILLOPC: event = "Illegal opcode"; break;
1607 case VKI_ILL_ILLOPN: event = "Illegal operand"; break;
1608 case VKI_ILL_ILLADR: event = "Illegal addressing mode"; break;
1609 case VKI_ILL_ILLTRP: event = "Illegal trap"; break;
1610 case VKI_ILL_PRVOPC: event = "Privileged opcode"; break;
1611 case VKI_ILL_PRVREG: event = "Privileged register"; break;
1612 case VKI_ILL_COPROC: event = "Coprocessor error"; break;
1613 case VKI_ILL_BADSTK: event = "Internal stack error"; break;
1618 switch (info->si_code) {
1619 case VKI_FPE_INTDIV: event = "Integer divide by zero"; break;
1620 case VKI_FPE_INTOVF: event = "Integer overflow"; break;
1621 case VKI_FPE_FLTDIV: event = "FP divide by zero"; break;
1622 case VKI_FPE_FLTOVF: event = "FP overflow"; break;
1623 case VKI_FPE_FLTUND: event = "FP underflow"; break;
1624 case VKI_FPE_FLTRES: event = "FP inexact"; break;
1625 case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
1626 case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
1631 switch (info->si_code) {
1632 case VKI_BUS_ADRALN: event = "Invalid address alignment"; break;
1633 case VKI_BUS_ADRERR: event = "Non-existent physical address"; break;
1634 case VKI_BUS_OBJERR: event = "Hardware error"; break;
1637 } /* switch (sigNo) */
1639 if (event != NULL) {
1641 VG_(umsg)(" %s at address %p\n",
1642 event, info->VKI_SIGINFO_si_addr);
1644 VG_(umsg)(" %s\n", event);
1647 /* Print a stack trace. Be cautious if the thread's SP is in an
1648 obviously stupid place (not mapped readable) that would
1649 likely cause a segfault. */
1650 if (VG_(is_valid_tid)(tid)) {
1651 ExeContext* ec = VG_(am_is_valid_for_client)
1652 (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
1653 ? VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ )
1654 : VG_(record_depth_1_ExeContext)( tid );
1656 VG_(pp_ExeContext)( ec );
1658 if (sigNo == VKI_SIGSEGV
1659 && info && is_signal_from_kernel(tid, sigNo, info->si_code)
1660 && info->si_code == VKI_SEGV_MAPERR) {
1661 VG_(umsg)(" If you believe this happened as a result of a stack\n" );
1662 VG_(umsg)(" overflow in your program's main thread (unlikely but\n");
1663 VG_(umsg)(" possible), you can try to increase the size of the\n" );
1664 VG_(umsg)(" main thread stack using the --main-stacksize= flag.\n" );
1665 // FIXME: assumes main ThreadId == 1
1666 if (VG_(is_valid_tid)(1)) {
1668 " The main thread stack size used in this run was %d.\n",
1669 (Int)VG_(threads)[1].client_stack_szB);
1674 if (VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) )) {
1675 VG_(start_debugger)( tid );
1679 const static struct vki_rlimit zero = { 0, 0 };
1681 VG_(make_coredump)(tid, info, corelim.rlim_cur);
1683 /* Make sure we don't get a confusing kernel-generated
1684 coredump when we finally exit */
1685 VG_(setrlimit)(VKI_RLIMIT_CORE, &zero);
1688 /* stash fatal signal in main thread */
1690 //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
1693 VG_(nuke_all_threads_except)(tid, VgSrc_FatalSig);
1694 VG_(threads)[tid].exitreason = VgSrc_FatalSig;
1695 VG_(threads)[tid].os_state.fatalsig = sigNo;
1699 This does the business of delivering a signal to a thread. It may
1700 be called from either a real signal handler, or from normal code to
1701 cause the thread to enter the signal handler.
1703 This updates the thread state, but it does not set it to be
1706 static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info,
1707 const struct vki_ucontext *uc )
1709 #if defined(VGO_l4re)
1710 VG_(unimplemented)((char *)__func__);
1712 Int sigNo = info->si_signo;
1713 SCSS_Per_Signal *handler = &scss.scss_per_sig[sigNo];
1715 ThreadState *tst = VG_(get_ThreadState)(tid);
1717 if (VG_(clo_trace_signals))
1718 VG_(dmsg)("delivering signal %d (%s):%d to thread %d\n",
1719 sigNo, signame(sigNo), info->si_code, tid );
1721 if (sigNo == VG_SIGVGKILL) {
1722 /* If this is a SIGVGKILL, we're expecting it to interrupt any
1723 blocked syscall. It doesn't matter whether the VCPU state is
1724 set to restart or not, because we don't expect it will
1725 execute any more client instructions. */
1726 vg_assert(VG_(is_exiting)(tid));
1730 /* If the client specifies SIG_IGN, treat it as SIG_DFL.
1732 If deliver_signal() is being called on a thread, we want
1733 the signal to get through no matter what; if they're ignoring
1734 it, then we do this override (this is so we can send it SIGSEGV,
1736 handler_fn = handler->scss_handler;
1737 if (handler_fn == VKI_SIG_IGN)
1738 handler_fn = VKI_SIG_DFL;
1740 vg_assert(handler_fn != VKI_SIG_IGN);
1742 if (handler_fn == VKI_SIG_DFL) {
1743 default_action(info, tid);
1745 /* Create a signal delivery frame, and set the client's %ESP and
1746 %EIP so that when execution continues, we will enter the
1747 signal handler with the frame on top of the client's stack,
1750 Signal delivery can fail if the client stack is too small or
1751 missing, and we can't push the frame. If that happens,
1752 push_signal_frame will cause the whole process to exit when
1753 we next hit the scheduler.
1755 vg_assert(VG_(is_valid_tid)(tid));
1757 push_signal_frame ( tid, info, uc );
1759 if (handler->scss_flags & VKI_SA_ONESHOT) {
1760 /* Do the ONESHOT thing. */
1761 handler->scss_handler = VKI_SIG_DFL;
1763 handle_SCSS_change( False /* lazy update */ );
1767 tst->sig_mask is the current signal mask
1768 tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
1769 handler->scss_mask is the mask set by the handler
1771 Handler gets a mask of tmp_sig_mask|handler_mask|signo
1773 tst->sig_mask = tst->tmp_sig_mask;
1774 if (!(handler->scss_flags & VKI_SA_NOMASK)) {
1775 VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
1776 VG_(sigaddset)(&tst->sig_mask, sigNo);
1777 tst->tmp_sig_mask = tst->sig_mask;
1782 /* Thread state is ready to go - just add Runnable */
1785 static void resume_scheduler(ThreadId tid)
1787 ThreadState *tst = VG_(get_ThreadState)(tid);
1789 vg_assert(tst->os_state.lwpid == VG_(gettid)());
1791 if (tst->sched_jmpbuf_valid) {
1792 /* Can't continue; must longjmp back to the scheduler and thus
1793 enter the sighandler immediately. */
1794 __builtin_longjmp(tst->sched_jmpbuf, True);
1798 static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
1800 #if defined(VGO_l4re)
1801 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1802 VG_(message)(Vg_DebugMsg,"%s(tid=%d, addr=%p, si_code=%d)\n", __func__, tid, addr, si_code);
1804 enter_kdebug("synth_fault_common");
1809 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1811 VG_(memset)(&info, 0, sizeof(info));
1812 info.si_signo = VKI_SIGSEGV;
1813 info.si_code = si_code;
1814 info.VKI_SIGINFO_si_addr = (void*)addr;
1816 /* If they're trying to block the signal, force it to be delivered */
1817 if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
1818 VG_(set_default_handler)(VKI_SIGSEGV);
1820 deliver_signal(tid, &info, NULL);
1824 // Synthesize a fault where the address is OK, but the page
1825 // permissions are bad.
1826 void VG_(synth_fault_perms)(ThreadId tid, Addr addr)
1828 synth_fault_common(tid, addr, VKI_SEGV_ACCERR);
1831 // Synthesize a fault where the address there's nothing mapped at the address.
1832 void VG_(synth_fault_mapping)(ThreadId tid, Addr addr)
1834 synth_fault_common(tid, addr, VKI_SEGV_MAPERR);
1837 // Synthesize a misc memory fault.
1838 void VG_(synth_fault)(ThreadId tid)
1840 synth_fault_common(tid, 0, VKI_SEGV_MADE_UP_GPF);
1843 // Synthesise a SIGILL.
1844 void VG_(synth_sigill)(ThreadId tid, Addr addr)
1848 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1850 VG_(memset)(&info, 0, sizeof(info));
1851 info.si_signo = VKI_SIGILL;
1852 info.si_code = VKI_ILL_ILLOPC; /* jrs: no idea what this should be */
1853 info.VKI_SIGINFO_si_addr = (void*)addr;
1855 resume_scheduler(tid);
1856 deliver_signal(tid, &info, NULL);
1859 // Synthesise a SIGBUS.
1860 void VG_(synth_sigbus)(ThreadId tid)
1864 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1866 VG_(memset)(&info, 0, sizeof(info));
1867 info.si_signo = VKI_SIGBUS;
1868 /* There are several meanings to SIGBUS (as per POSIX, presumably),
1869 but the most widely understood is "invalid address alignment",
1870 so let's use that. */
1871 info.si_code = VKI_BUS_ADRALN;
1872 /* If we knew the invalid address in question, we could put it
1873 in .si_addr. Oh well. */
1874 /* info.VKI_SIGINFO_si_addr = (void*)addr; */
1876 resume_scheduler(tid);
1877 deliver_signal(tid, &info, NULL);
1880 // Synthesise a SIGTRAP.
1881 void VG_(synth_sigtrap)(ThreadId tid)
1884 struct vki_ucontext uc;
1885 # if defined(VGP_x86_darwin)
1886 struct __darwin_mcontext32 mc;
1887 # elif defined(VGP_amd64_darwin)
1888 struct __darwin_mcontext64 mc;
1891 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1893 VG_(memset)(&info, 0, sizeof(info));
1894 VG_(memset)(&uc, 0, sizeof(uc));
1895 info.si_signo = VKI_SIGTRAP;
1896 info.si_code = VKI_TRAP_BRKPT; /* tjh: only ever called for a brkpt ins */
1898 # if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
1899 uc.uc_mcontext.trapno = 3; /* tjh: this is the x86 trap number
1900 for a breakpoint trap... */
1901 uc.uc_mcontext.err = 0; /* tjh: no error code for x86
1902 breakpoint trap... */
1903 # elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1904 /* the same thing, but using Darwin field/struct names */
1905 VG_(memset)(&mc, 0, sizeof(mc));
1906 uc.uc_mcontext = &mc;
1907 uc.uc_mcontext->__es.__trapno = 3;
1908 uc.uc_mcontext->__es.__err = 0;
1911 resume_scheduler(tid);
1912 deliver_signal(tid, &info, &uc);
1915 /* Make a signal pending for a thread, for later delivery.
1916 VG_(poll_signals) will arrange for it to be delivered at the right
1919 tid==0 means add it to the process-wide queue, and not sent it to a
1923 void queue_signal(ThreadId tid, const vki_siginfo_t *si)
1927 vki_sigset_t savedmask;
1929 tst = VG_(get_ThreadState)(tid);
1931 /* Protect the signal queue against async deliveries */
1932 block_all_host_signals(&savedmask);
1934 if (tst->sig_queue == NULL) {
1935 tst->sig_queue = VG_(arena_malloc)(VG_AR_CORE, "signals.qs.1",
1936 sizeof(*tst->sig_queue));
1937 VG_(memset)(tst->sig_queue, 0, sizeof(*tst->sig_queue));
1939 sq = tst->sig_queue;
1941 if (VG_(clo_trace_signals))
1942 VG_(dmsg)("Queueing signal %d (idx %d) to thread %d\n",
1943 si->si_signo, sq->next, tid);
1945 /* Add signal to the queue. If the queue gets overrun, then old
1946 queued signals may get lost.
1948 XXX We should also keep a sigset of pending signals, so that at
1949 least a non-siginfo signal gets deliviered.
1951 if (sq->sigs[sq->next].si_signo != 0)
1952 VG_(umsg)("Signal %d being dropped from thread %d's queue\n",
1953 sq->sigs[sq->next].si_signo, tid);
1955 sq->sigs[sq->next] = *si;
1956 sq->next = (sq->next+1) % N_QUEUED_SIGNALS;
1958 restore_all_host_signals(&savedmask);
1962 Returns the next queued signal for thread tid which is in "set".
1963 tid==0 means process-wide signal. Set si_signo to 0 when the
1964 signal has been delivered.
1966 Must be called with all signals blocked, to protect against async
1969 static vki_siginfo_t *next_queued(ThreadId tid, const vki_sigset_t *set)
1971 ThreadState *tst = VG_(get_ThreadState)(tid);
1974 vki_siginfo_t *ret = NULL;
1976 sq = tst->sig_queue;
1983 VG_(printf)("idx=%d si_signo=%d inset=%d\n", idx,
1984 sq->sigs[idx].si_signo,
1985 VG_(sigismember)(set, sq->sigs[idx].si_signo));
1987 if (sq->sigs[idx].si_signo != 0
1988 && VG_(sigismember)(set, sq->sigs[idx].si_signo)) {
1989 if (VG_(clo_trace_signals))
1990 VG_(dmsg)("Returning queued signal %d (idx %d) for thread %d\n",
1991 sq->sigs[idx].si_signo, idx, tid);
1992 ret = &sq->sigs[idx];
1996 idx = (idx + 1) % N_QUEUED_SIGNALS;
1997 } while(idx != sq->next);
2002 static int sanitize_si_code(int si_code)
2004 #if defined(VGO_linux) || defined(VGO_l4re)
2005 /* The linux kernel uses the top 16 bits of si_code for it's own
2006 use and only exports the bottom 16 bits to user space - at least
2007 that is the theory, but it turns out that there are some kernels
2008 around that forget to mask out the top 16 bits so we do it here.
2010 The kernel treats the bottom 16 bits as signed and (when it does
2011 mask them off) sign extends them when exporting to user space so
2012 we do the same thing here. */
2013 return (Short)si_code;
2014 #elif defined(VGO_aix5) || defined(VGO_darwin)
2022 Receive an async signal from the kernel.
2024 This should only happen when the thread is blocked in a syscall,
2025 since that's the only time this set of signals is unblocked.
2028 void async_signalhandler ( Int sigNo,
2029 vki_siginfo_t *info, struct vki_ucontext *uc )
2031 #if !defined(VGO_l4re)
2032 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2033 ThreadState* tst = VG_(get_ThreadState)(tid);
2036 /* The thread isn't currently running, make it so before going on */
2037 vg_assert(tst->status == VgTs_WaitSys);
2038 VG_(acquire_BigLock)(tid, "async_signalhandler");
2040 info->si_code = sanitize_si_code(info->si_code);
2042 if (VG_(clo_trace_signals))
2043 VG_(dmsg)("async signal handler: signal=%d, tid=%d, si_code=%d\n",
2044 sigNo, tid, info->si_code);
2046 /* Update thread state properly. The signal can only have been
2047 delivered whilst we were in
2048 coregrind/m_syswrap/syscall-<PLAT>.S, and only then in the
2049 window between the two sigprocmask calls, since at all other
2050 times, we run with async signals on the host blocked. Hence
2051 make enquiries on the basis that we were in or very close to a
2052 syscall, and attempt to fix up the guest state accordingly.
2054 (normal async signals occurring during computation are blocked,
2055 but periodically polled for using VG_(sigtimedwait_zero), and
2056 delivered at a point convenient for us. Hence this routine only
2057 deals with signals that are delivered to a thread during a
2060 /* First, extract a SysRes from the ucontext_t* given to this
2061 handler. If it is subsequently established by
2062 VG_(fixup_guest_state_after_syscall_interrupted) that the
2063 syscall was complete but the results had not been committed yet
2064 to the guest state, then it'll have to commit the results itself
2065 "by hand", and so we need to extract the SysRes. Of course if
2066 the thread was not in that particular window then the
2067 SysRes will be meaningless, but that's OK too because
2068 VG_(fixup_guest_state_after_syscall_interrupted) will detect
2069 that the thread was not in said window and ignore the SysRes. */
2071 /* To make matters more complex still, on Darwin we need to know
2072 the "class" of the syscall under consideration in order to be
2073 able to extract the a correct SysRes. The class will have been
2074 saved just before the syscall, by VG_(client_syscall), into this
2075 thread's tst->arch.vex.guest_SC_CLASS. Hence: */
2076 # if defined(VGO_darwin)
2077 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc, tst->arch.vex.guest_SC_CLASS);
2079 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc);
2083 VG_(fixup_guest_state_after_syscall_interrupted)(
2085 VG_UCONTEXT_INSTR_PTR(uc),
2087 !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
2091 /* Set up the thread's state to deliver a signal */
2092 if (!is_sig_ign(info->si_signo))
2093 deliver_signal(tid, info, uc);
2095 /* It's crucial that (1) and (2) happen in the order (1) then (2)
2096 and not the other way around. (1) fixes up the guest thread
2097 state to reflect the fact that the syscall was interrupted --
2098 either to restart the syscall or to return EINTR. (2) then sets
2099 up the thread state to deliver the signal. Then we resume
2100 execution. First, the signal handler is run, since that's the
2101 second adjustment we made to the thread state. If that returns,
2102 then we resume at the guest state created by (1), viz, either
2103 the syscall returns EINTR or is restarted.
2105 If (2) was done before (1) the outcome would be completely
2106 different, and wrong. */
2108 /* longjmp back to the thread's main loop to start executing the
2110 resume_scheduler(tid);
2112 VG_(core_panic)("async_signalhandler: got unexpected signal "
2113 "while outside of scheduler");
2115 VG_(unimplemented)("unimplemented function async_signalhandler()");
2119 /* Extend the stack to cover addr. maxsize is the limit the stack can grow to.
2121 Returns True on success, False on failure.
2123 Succeeds without doing anything if addr is already within a segment.
2125 Failure could be caused by:
2126 - addr not below a growable segment
2127 - new stack size would exceed maxsize
2128 - mmap failed for some other reason
2130 Bool VG_(extend_stack)(Addr addr, UInt maxsize)
2134 /* Find the next Segment above addr */
2136 = VG_(am_find_nsegment)(addr);
2137 NSegment const* seg_next
2138 = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2141 if (seg && seg->kind == SkAnonC)
2142 /* addr is already mapped. Nothing to do. */
2145 /* Check that the requested new base is in a shrink-down
2146 reservation section which abuts an anonymous mapping that
2147 belongs to the client. */
2149 && seg->kind == SkResvn
2150 && seg->smode == SmUpper
2152 && seg_next->kind == SkAnonC
2153 && seg->end+1 == seg_next->start))
2156 udelta = VG_PGROUNDUP(seg_next->start - addr);
2157 VG_(debugLog)(1, "signals",
2158 "extending a stack base 0x%llx down by %lld\n",
2159 (ULong)seg_next->start, (ULong)udelta);
2160 if (! VG_(am_extend_into_adjacent_reservation_client)
2161 ( (NSegment*)seg_next, -(SSizeT)udelta )) {
2162 VG_(debugLog)(1, "signals", "extending a stack base: FAILED\n");
2166 /* When we change the main stack, we have to let the stack handling
2167 code know about it. */
2168 VG_(change_stack)(VG_(clstk_id), addr, VG_(clstk_end));
2170 if (VG_(clo_sanity_level) > 2)
2171 VG_(sanity_check_general)(False);
2176 static void (*fault_catcher)(Int sig, Addr addr) = NULL;
2178 void VG_(set_fault_catcher)(void (*catcher)(Int, Addr))
2181 VG_(debugLog)(0, "signals", "set fault catcher to %p\n", catcher);
2182 vg_assert2(NULL == catcher || NULL == fault_catcher,
2183 "Fault catcher is already registered");
2185 fault_catcher = catcher;
2189 void sync_signalhandler_from_user ( ThreadId tid,
2190 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2194 /* If some user-process sent us a sync signal (ie. it's not the result
2195 of a faulting instruction), then how we treat it depends on when it
2198 if (VG_(threads)[tid].status == VgTs_WaitSys) {
2199 /* Signal arrived while we're blocked in a syscall. This means that
2200 the client's signal mask was applied. In other words, so we can't
2201 get here unless the client wants this signal right now. This means
2202 we can simply use the async_signalhandler. */
2203 if (VG_(clo_trace_signals))
2204 VG_(dmsg)("Delivering user-sent sync signal %d as async signal\n",
2207 async_signalhandler(sigNo, info, uc);
2208 VG_(core_panic)("async_signalhandler returned!?\n");
2211 /* Signal arrived while in generated client code, or while running
2212 Valgrind core code. That means that every thread has these signals
2213 unblocked, so we can't rely on the kernel to route them properly, so
2214 we need to queue them manually. */
2215 if (VG_(clo_trace_signals))
2216 VG_(dmsg)("Routing user-sent sync signal %d via queue\n", sigNo);
2218 # if defined(VGO_linux)
2219 /* On Linux, first we have to do a sanity check of the siginfo. */
2220 if (info->VKI_SIGINFO_si_pid == 0) {
2221 /* There's a per-user limit of pending siginfo signals. If
2222 you exceed this, by having more than that number of
2223 pending signals with siginfo, then new signals are
2224 delivered without siginfo. This condition can be caused
2225 by any unrelated program you're running at the same time
2226 as Valgrind, if it has a large number of pending siginfo
2227 signals which it isn't taking delivery of.
2229 Since we depend on siginfo to work out why we were sent a
2230 signal and what we should do about it, we really can't
2231 continue unless we get it. */
2232 VG_(umsg)("Signal %d (%s) appears to have lost its siginfo; "
2233 "I can't go on.\n", sigNo, signame(sigNo));
2235 " This may be because one of your programs has consumed your ration of\n"
2236 " siginfo structures. For more information, see:\n"
2237 " http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
2238 " Basically, some program on your system is building up a large queue of\n"
2239 " pending signals, and this causes the siginfo data for other signals to\n"
2240 " be dropped because it's exceeding a system limit. However, Valgrind\n"
2241 " absolutely needs siginfo for SIGSEGV. A workaround is to track down the\n"
2242 " offending program and avoid running it while using Valgrind, but there\n"
2243 " is no easy way to do this. Apparently the problem was fixed in kernel\n"
2246 /* It's a fatal signal, so we force the default handler. */
2247 VG_(set_default_handler)(sigNo);
2248 deliver_signal(tid, info, uc);
2249 resume_scheduler(tid);
2250 VG_(exit)(99); /* If we can't resume, then just exit */
2254 qtid = 0; /* shared pending by default */
2255 # if defined(VGO_linux)
2256 if (info->si_code == VKI_SI_TKILL)
2257 qtid = tid; /* directed to us specifically */
2259 queue_signal(qtid, info);
2263 /* Returns True if the sync signal was due to the stack requiring extension
2264 and the extension was successful.
2266 static Bool extend_stack_if_appropriate(ThreadId tid, vki_siginfo_t* info)
2270 NSegment const* seg;
2271 NSegment const* seg_next;
2273 if (info->si_signo != VKI_SIGSEGV)
2276 fault = (Addr)info->VKI_SIGINFO_si_addr;
2277 esp = VG_(get_SP)(tid);
2278 seg = VG_(am_find_nsegment)(fault);
2279 seg_next = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2282 if (VG_(clo_trace_signals)) {
2284 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2286 info->si_code, fault, tid, esp);
2288 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2290 info->si_code, fault, tid, esp, seg->start, seg->end);
2293 if (info->si_code == VKI_SEGV_MAPERR
2295 && seg->kind == SkResvn
2296 && seg->smode == SmUpper
2298 && seg_next->kind == SkAnonC
2299 && seg->end+1 == seg_next->start
2300 && fault >= (esp - VG_STACK_REDZONE_SZB)) {
2301 /* If the fault address is above esp but below the current known
2302 stack segment base, and it was a fault because there was
2303 nothing mapped there (as opposed to a permissions fault),
2304 then extend the stack segment.
2306 Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
2307 if (VG_(extend_stack)(base, VG_(threads)[tid].client_stack_szB)) {
2308 if (VG_(clo_trace_signals))
2309 VG_(dmsg)(" -> extended stack base to %#lx\n",
2310 VG_PGROUNDDN(fault));
2313 VG_(umsg)("Stack overflow in thread %d: can't grow stack to %#lx\n",
2323 void sync_signalhandler_from_kernel ( ThreadId tid,
2324 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2326 /* Check to see if some part of Valgrind itself is interested in faults.
2327 The fault catcher should never be set whilst we're in generated code, so
2328 check for that. AFAIK the only use of the catcher right now is
2329 memcheck's leak detector. */
2330 if (fault_catcher) {
2331 vg_assert(VG_(in_generated_code) == False);
2333 (*fault_catcher)(sigNo, (Addr)info->VKI_SIGINFO_si_addr);
2334 /* If the catcher returns, then it didn't handle the fault,
2335 so carry on panicking. */
2338 if (extend_stack_if_appropriate(tid, info)) {
2339 /* Stack extension occurred, so we don't need to do anything else; upon
2340 returning from this function, we'll restart the host (hence guest)
2343 /* OK, this is a signal we really have to deal with. If it came
2344 from the client's code, then we can jump back into the scheduler
2345 and have it delivered. Otherwise it's a Valgrind bug. */
2346 ThreadState *tst = VG_(get_ThreadState)(tid);
2348 if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
2349 /* signal is blocked, but they're not allowed to block faults */
2350 VG_(set_default_handler)(sigNo);
2353 if (VG_(in_generated_code)) {
2354 /* Can't continue; must longjmp back to the scheduler and thus
2355 enter the sighandler immediately. */
2356 deliver_signal(tid, info, uc);
2357 resume_scheduler(tid);
2360 /* If resume_scheduler returns or its our fault, it means we
2361 don't have longjmp set up, implying that we weren't running
2362 client code, and therefore it was actually generated by
2363 Valgrind internally.
2365 VG_(dmsg)("VALGRIND INTERNAL ERROR: Valgrind received "
2366 "a signal %d (%s) - exiting\n",
2367 sigNo, signame(sigNo));
2369 VG_(dmsg)("si_code=%x; Faulting address: %p; sp: %#lx\n",
2370 info->si_code, info->VKI_SIGINFO_si_addr,
2371 VG_UCONTEXT_STACK_PTR(uc));
2374 VG_(kill_self)(sigNo); /* generate a core dump */
2376 //if (tid == 0) /* could happen after everyone has exited */
2377 // tid = VG_(master_tid);
2378 vg_assert(tid != 0);
2380 UnwindStartRegs startRegs;
2381 VG_(memset)(&startRegs, 0, sizeof(startRegs));
2383 VG_UCONTEXT_TO_UnwindStartRegs(&startRegs, uc);
2384 VG_(core_panic_at)("Killed by fatal signal", &startRegs);
2389 Receive a sync signal from the host.
2392 void sync_signalhandler ( Int sigNo,
2393 vki_siginfo_t *info, struct vki_ucontext *uc )
2395 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2399 VG_(printf)("sync_sighandler(%d, %p, %p)\n", sigNo, info, uc);
2401 vg_assert(info != NULL);
2402 vg_assert(info->si_signo == sigNo);
2403 vg_assert(sigNo == VKI_SIGSEGV ||
2404 sigNo == VKI_SIGBUS ||
2405 sigNo == VKI_SIGFPE ||
2406 sigNo == VKI_SIGILL ||
2407 sigNo == VKI_SIGTRAP);
2409 info->si_code = sanitize_si_code(info->si_code);
2411 from_user = !is_signal_from_kernel(tid, sigNo, info->si_code);
2413 if (VG_(clo_trace_signals)) {
2414 VG_(dmsg)("sync signal handler: "
2415 "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s\n",
2416 sigNo, info->si_code, VG_(get_IP)(tid),
2417 VG_UCONTEXT_INSTR_PTR(uc),
2418 ( from_user ? "user" : "kernel" ));
2420 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
2424 VG_(printf)("info->si_signo %d\n", info->si_signo);
2425 VG_(printf)("info->si_errno %d\n", info->si_errno);
2426 VG_(printf)("info->si_code %d\n", info->si_code);
2427 VG_(printf)("info->si_pid %d\n", info->si_pid);
2428 VG_(printf)("info->si_uid %d\n", info->si_uid);
2429 VG_(printf)("info->si_status %d\n", info->si_status);
2430 VG_(printf)("info->si_addr %p\n", info->si_addr);
2434 /* Figure out if the signal is being sent from outside the process.
2435 (Why do we care?) If the signal is from the user rather than the
2436 kernel, then treat it more like an async signal than a sync signal --
2437 that is, merely queue it for later delivery. */
2439 sync_signalhandler_from_user( tid, sigNo, info, uc);
2441 sync_signalhandler_from_kernel(tid, sigNo, info, uc);
2447 Kill this thread. Makes it leave any syscall it might be currently
2448 blocked in, and return to the scheduler. This doesn't mark the thread
2449 as exiting; that's the caller's job.
2451 static void sigvgkill_handler(int signo, vki_siginfo_t *si,
2452 struct vki_ucontext *uc)
2454 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2455 ThreadStatus at_signal = VG_(threads)[tid].status;
2457 if (VG_(clo_trace_signals))
2458 VG_(dmsg)("sigvgkill for lwp %d tid %d\n", VG_(gettid)(), tid);
2460 VG_(acquire_BigLock)(tid, "sigvgkill_handler");
2462 vg_assert(signo == VG_SIGVGKILL);
2463 vg_assert(si->si_signo == signo);
2465 /* jrs 2006 August 3: the following assertion seems incorrect to
2466 me, and fails on AIX. sigvgkill could be sent to a thread which
2467 is runnable - see VG_(nuke_all_threads_except) in the scheduler.
2468 Hence comment these out ..
2470 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
2471 VG_(post_syscall)(tid);
2475 if (at_signal == VgTs_WaitSys)
2476 VG_(post_syscall)(tid);
2477 /* jrs 2006 August 3 ends */
2479 resume_scheduler(tid);
2481 VG_(core_panic)("sigvgkill_handler couldn't return to the scheduler\n");
2484 static __attribute((unused))
2485 void pp_ksigaction ( vki_sigaction_toK_t* sa )
2488 VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
2491 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2492 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2498 VG_(printf)("pp_ksigaction: { ");
2499 for (i = 1; i <= VG_(max_signal); i++)
2500 if (VG_(sigismember(&(sa->sa_mask),i)))
2501 VG_(printf)("%d ", i);
2506 Force signal handler to default
2508 void VG_(set_default_handler)(Int signo)
2510 vki_sigaction_toK_t sa;
2512 sa.ksa_handler = VKI_SIG_DFL;
2514 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2515 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2518 VG_(sigemptyset)(&sa.sa_mask);
2520 VG_(do_sys_sigaction)(signo, &sa, NULL);
2524 Poll for pending signals, and set the next one up for delivery.
2526 void VG_(poll_signals)(ThreadId tid)
2528 vki_siginfo_t si, *sip;
2529 vki_sigset_t pollset;
2530 ThreadState *tst = VG_(get_ThreadState)(tid);
2531 vki_sigset_t saved_mask;
2533 /* look for all the signals this thread isn't blocking */
2534 /* pollset = ~tst->sig_mask */
2535 VG_(sigcomplementset)( &pollset, &tst->sig_mask );
2537 block_all_host_signals(&saved_mask); // protect signal queue
2539 /* First look for any queued pending signals */
2540 sip = next_queued(tid, &pollset); /* this thread */
2543 sip = next_queued(0, &pollset); /* process-wide */
2545 /* If there was nothing queued, ask the kernel for a pending signal */
2546 if (sip == NULL && VG_(sigtimedwait_zero)(&pollset, &si) > 0) {
2547 if (VG_(clo_trace_signals))
2548 VG_(dmsg)("poll_signals: got signal %d for thread %d\n",
2554 /* OK, something to do; deliver it */
2555 if (VG_(clo_trace_signals))
2556 VG_(dmsg)("Polling found signal %d for tid %d\n", sip->si_signo, tid);
2557 if (!is_sig_ign(sip->si_signo))
2558 deliver_signal(tid, sip, NULL);
2559 else if (VG_(clo_trace_signals))
2560 VG_(dmsg)(" signal %d ignored\n", sip->si_signo);
2562 sip->si_signo = 0; /* remove from signal queue, if that's
2563 where it came from */
2566 restore_all_host_signals(&saved_mask);
2569 /* At startup, copy the process' real signal state to the SCSS.
2570 Whilst doing this, block all real signals. Then calculate SKSS and
2571 set the kernel to that. Also initialise DCSS.
2573 void VG_(sigstartup_actions) ( void )
2575 #if defined(VGO_l4re)
2576 VG_(unimplemented)((char *)__func__);
2578 Int i, ret, vKI_SIGRTMIN;
2579 vki_sigset_t saved_procmask;
2580 vki_sigaction_fromK_t sa;
2582 VG_(memset)(&scss, 0, sizeof(scss));
2583 VG_(memset)(&skss, 0, sizeof(skss));
2585 # if defined(VKI_SIGRTMIN)
2586 vKI_SIGRTMIN = VKI_SIGRTMIN;
2588 vKI_SIGRTMIN = 0; /* eg Darwin */
2591 /* VG_(printf)("SIGSTARTUP\n"); */
2592 /* Block all signals. saved_procmask remembers the previous mask,
2593 which the first thread inherits.
2595 block_all_host_signals( &saved_procmask );
2597 /* Copy per-signal settings to SCSS. */
2598 for (i = 1; i <= _VKI_NSIG; i++) {
2599 /* Get the old host action */
2600 ret = VG_(sigaction)(i, NULL, &sa);
2602 # if defined(VGP_x86_darwin)
2603 /* apparently we may not even ask about the disposition of these
2604 signals, let alone change them */
2605 if (ret != 0 && (i == VKI_SIGKILL || i == VKI_SIGSTOP))
2612 /* Try setting it back to see if this signal is really
2614 if (vKI_SIGRTMIN > 0 /* it actually exists on this platform */
2615 && i >= vKI_SIGRTMIN) {
2616 vki_sigaction_toK_t tsa, sa2;
2618 tsa.ksa_handler = (void *)sync_signalhandler;
2619 tsa.sa_flags = VKI_SA_SIGINFO;
2620 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2621 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2622 tsa.sa_restorer = 0;
2624 VG_(sigfillset)(&tsa.sa_mask);
2626 /* try setting it to some arbitrary handler */
2627 if (VG_(sigaction)(i, &tsa, NULL) != 0) {
2628 /* failed - not really usable */
2632 VG_(convert_sigaction_fromK_to_toK)( &sa, &sa2 );
2633 ret = VG_(sigaction)(i, &sa2, NULL);
2634 vg_assert(ret == 0);
2637 VG_(max_signal) = i;
2639 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
2640 VG_(printf)("snaffling handler 0x%lx for signal %d\n",
2641 (Addr)(sa.ksa_handler), i );
2643 scss.scss_per_sig[i].scss_handler = sa.ksa_handler;
2644 scss.scss_per_sig[i].scss_flags = sa.sa_flags;
2645 scss.scss_per_sig[i].scss_mask = sa.sa_mask;
2647 scss.scss_per_sig[i].scss_restorer = NULL;
2648 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2649 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2650 scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
2653 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2654 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2655 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2657 /* We can't know what it was, because Darwin's sys_sigaction
2662 if (VG_(clo_trace_signals))
2663 VG_(dmsg)("Max kernel-supported signal is %d\n", VG_(max_signal));
2665 /* Our private internal signals are treated as ignored */
2666 scss.scss_per_sig[VG_SIGVGKILL].scss_handler = VKI_SIG_IGN;
2667 scss.scss_per_sig[VG_SIGVGKILL].scss_flags = VKI_SA_SIGINFO;
2668 VG_(sigfillset)(&scss.scss_per_sig[VG_SIGVGKILL].scss_mask);
2670 /* Copy the process' signal mask into the root thread. */
2671 vg_assert(VG_(threads)[1].status == VgTs_Init);
2672 for (i = 2; i < VG_N_THREADS; i++)
2673 vg_assert(VG_(threads)[i].status == VgTs_Empty);
2675 VG_(threads)[1].sig_mask = saved_procmask;
2676 VG_(threads)[1].tmp_sig_mask = saved_procmask;
2678 /* Calculate SKSS and apply it. This also sets the initial kernel
2679 mask we need to run with. */
2680 handle_SCSS_change( True /* forced update */ );
2682 /* Leave with all signals still blocked; the thread scheduler loop
2683 will set the appropriate mask at the appropriate time. */
2687 /*--------------------------------------------------------------------*/
2689 /*--------------------------------------------------------------------*/