2 /*--------------------------------------------------------------------*/
3 /*--- Implementation of POSIX signals. m_signals.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2010 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
34 There are 4 distinct classes of signal:
36 1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
37 TRAP): these are signals as a result of an instruction fault. If
38 we get one while running client code, then we just do the
39 appropriate thing. If it happens while running Valgrind code, then
40 it indicates a Valgrind bug. Note that we "manually" implement
41 automatic stack growth, such that if a fault happens near the
42 client process stack, it is extended in the same way the kernel
43 would, and the fault is never reported to the client program.
45 2. Asynchronous variants of the above signals: If the kernel tries
46 to deliver a sync signal while it is blocked, it just kills the
47 process. Therefore, we can't block those signals if we want to be
48 able to report on bugs in Valgrind. This means that we're also
49 open to receiving those signals from other processes, sent with
50 kill. We could get away with just dropping them, since they aren't
51 really signals that processes send to each other.
53 3. Synchronous, general signals. If a thread/process sends itself
54 a signal with kill, its expected to be synchronous: ie, the signal
55 will have been delivered by the time the syscall finishes.
57 4. Asynchronous, general signals. All other signals, sent by
58 another process with kill. These are generally blocked, except for
59 two special cases: we poll for them each time we're about to run a
60 thread for a time quanta, and while running blocking syscalls.
63 In addition, we reserve one signal for internal use: SIGVGKILL.
64 SIGVGKILL is used to terminate threads. When one thread wants
65 another to exit, it will set its exitreason and send it SIGVGKILL
66 if it appears to be blocked in a syscall.
69 We use a kernel thread for each application thread. When the
70 thread allows itself to be open to signals, it sets the thread
71 signal mask to what the client application set it to. This means
72 that we get the kernel to do all signal routing: under Valgrind,
73 signals get delivered in the same way as in the non-Valgrind case
74 (the exception being for the sync signal set, since they're almost
81 First off, we take note of the client's requests (via sys_sigaction
82 and sys_sigprocmask) to set the signal state (handlers for each
83 signal, which are process-wide, + a mask for each signal, which is
84 per-thread). This info is duly recorded in the SCSS (static Client
85 signal state) in m_signals.c, and if the client later queries what
86 the state is, we merely fish the relevant info out of SCSS and give
89 However, we set the real signal state in the kernel to something
90 entirely different. This is recorded in SKSS, the static Kernel
91 signal state. What's nice (to the extent that anything is nice w.r.t
92 signals) is that there's a pure function to calculate SKSS from SCSS,
93 calculate_SKSS_from_SCSS. So when the client changes SCSS then we
94 recompute the associated SKSS and apply any changes from the previous
95 SKSS through to the kernel.
97 Now, that said, the general scheme we have now is, that regardless of
98 what the client puts into the SCSS (viz, asks for), what we would
99 like to do is as follows:
101 (1) run code on the virtual CPU with all signals blocked
103 (2) at convenient moments for us (that is, when the VCPU stops, and
104 control is back with the scheduler), ask the kernel "do you have
105 any signals for me?" and if it does, collect up the info, and
106 deliver them to the client (by building sigframes).
108 And that's almost what we do. The signal polling is done by
109 VG_(poll_signals), which calls through to VG_(sigtimedwait_zero) to
110 do the dirty work. (of which more later).
112 By polling signals, rather than catching them, we get to deal with
113 them only at convenient moments, rather than having to recover from
114 taking a signal while generated code is running.
116 Now unfortunately .. the above scheme only works for so-called async
117 signals. An async signal is one which isn't associated with any
118 particular instruction, eg Control-C (SIGINT). For those, it doesn't
119 matter if we don't deliver the signal to the client immediately; it
120 only matters that we deliver it eventually. Hence polling is OK.
122 But the other group -- sync signals -- are all related by the fact
123 that they are various ways for the host CPU to fail to execute an
124 instruction: SIGILL, SIGSEGV, SIGFPU. And they can't be deferred,
125 because obviously if a host instruction can't execute, well then we
126 have to immediately do Plan B, whatever that is.
128 So the next approximation of what happens is:
130 (1) run code on vcpu with all async signals blocked
132 (2) at convenient moments (when NOT running the vcpu), poll for async
135 (1) and (2) together imply that if the host does deliver a signal to
136 async_signalhandler while the VCPU is running, something's
139 (3) when running code on vcpu, don't block sync signals. Instead
140 register sync_signalhandler and catch any such via that. Of
141 course, that means an ugly recovery path if we do -- the
142 sync_signalhandler has to longjump, exiting out of the generated
143 code, and the assembly-dispatcher thingy that runs it, and gets
144 caught in m_scheduler, which then tells m_signals to deliver the
147 Now naturally (ha ha) even that might be tolerable, but there's
148 something worse: dealing with signals delivered to threads in
151 Obviously from the above, SKSS's signal mask (viz, what we really run
152 with) is way different from SCSS's signal mask (viz, what the client
153 thread thought it asked for). (eg) It may well be that the client
154 did not block control-C, so that it just expects to drop dead if it
155 receives ^C whilst blocked in a syscall, but by default we are
156 running with all async signals blocked, and so that signal could be
157 arbitrarily delayed, or perhaps even lost (not sure).
159 So what we have to do, when doing any syscall which SfMayBlock, is to
160 quickly switch in the SCSS-specified signal mask just before the
161 syscall, and switch it back just afterwards, and hope that we don't
162 get caught up in some wierd race condition. This is the primary
163 purpose of the ultra-magical pieces of assembly code in
164 coregrind/m_syswrap/syscall-<plat>.S
168 The ways in which V can come to hear of signals that need to be
169 forwarded to the client as are follows:
171 sync signals: can arrive at any time whatsoever. These are caught
172 by sync_signalhandler
176 if running generated code
177 then these are blocked, so we don't expect to catch them in
181 if thread is blocked in a syscall marked SfMayBlock
182 then signals may be delivered to async_sighandler, since we
183 temporarily unblocked them for the duration of the syscall,
184 by using the real (SCSS) mask for this thread
186 else we're doing misc housekeeping activities (eg, making a translation,
187 washing our hair, etc). As in the normal case, these signals are
188 blocked, but we can and do poll for them using VG_(poll_signals).
190 Now, re VG_(poll_signals), it polls the kernel by doing
191 VG_(sigtimedwait_zero). This is trivial on Linux, since it's just a
192 syscall. But on Darwin and AIX, we have to cobble together the
193 functionality in a tedious, longwinded and probably error-prone way.
195 Finally, if a gdb is debugging the process under valgrind,
196 the signal can be ignored if gdb tells this. So, before resuming the
197 scheduler/delivering the signal, a call to VG_(gdbserver_report_signal)
198 is done. If this returns True, the signal is delivered.
201 #include "pub_core_basics.h"
202 #include "pub_core_vki.h"
203 #include "pub_core_vkiscnums.h"
204 #include "pub_core_debuglog.h"
205 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
206 #include "pub_core_threadstate.h"
207 #include "pub_core_xarray.h"
208 #include "pub_core_clientstate.h"
209 #include "pub_core_aspacemgr.h"
210 #include "pub_core_debugger.h" // For VG_(start_debugger)
211 #include "pub_core_errormgr.h"
212 #include "pub_core_gdbserver.h"
213 #include "pub_core_libcbase.h"
214 #include "pub_core_libcassert.h"
215 #include "pub_core_libcprint.h"
216 #include "pub_core_libcproc.h"
217 #include "pub_core_libcsignal.h"
218 #include "pub_core_machine.h"
219 #include "pub_core_mallocfree.h"
220 #include "pub_core_options.h"
221 #include "pub_core_scheduler.h"
222 #include "pub_core_signals.h"
223 #include "pub_core_sigframe.h" // For VG_(sigframe_create)()
224 #include "pub_core_stacks.h" // For VG_(change_stack)()
225 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
226 #include "pub_core_syscall.h"
227 #include "pub_core_syswrap.h"
228 #include "pub_core_tooliface.h"
229 #include "pub_core_coredump.h"
232 /* ---------------------------------------------------------------------
234 ------------------------------------------------------------------ */
236 static void sync_signalhandler ( Int sigNo, vki_siginfo_t *info,
237 struct vki_ucontext * );
238 static void async_signalhandler ( Int sigNo, vki_siginfo_t *info,
239 struct vki_ucontext * );
240 static void sigvgkill_handler ( Int sigNo, vki_siginfo_t *info,
241 struct vki_ucontext * );
243 static const Char *signame(Int sigNo);
245 /* Maximum usable signal. */
246 Int VG_(max_signal) = _VKI_NSIG;
248 #define N_QUEUED_SIGNALS 8
250 typedef struct SigQueue {
252 vki_siginfo_t sigs[N_QUEUED_SIGNALS];
255 /* ------ Macros for pulling stuff out of ucontexts ------ */
257 /* Q: what does VG_UCONTEXT_SYSCALL_SYSRES do? A: let's suppose the
258 machine context (uc) reflects the situation that a syscall had just
259 completed, quite literally -- that is, that the program counter was
260 now at the instruction following the syscall. (or we're slightly
261 downstream, but we're sure no relevant register has yet changed
262 value.) Then VG_UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
263 the result of the syscall; it does this by fishing relevant bits of
264 the machine state out of the uc. Of course if the program counter
265 was somewhere else entirely then the result is likely to be
266 meaningless, so the caller of VG_UCONTEXT_SYSCALL_SYSRES has to be
267 very careful to pay attention to the results only when it is sure
268 that the said constraint on the program counter is indeed valid. */
270 #if defined(VGP_x86_linux)
271 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
272 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
273 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
274 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
275 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
276 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
277 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip); \
278 (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp); \
279 (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp; \
282 #elif defined(VGP_amd64_linux)
283 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.rip)
284 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.rsp)
285 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
286 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
287 VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
288 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
289 { (srP)->r_pc = (uc)->uc_mcontext.rip; \
290 (srP)->r_sp = (uc)->uc_mcontext.rsp; \
291 (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
294 #elif defined(VGP_ppc32_linux)
295 /* Comments from Paul Mackerras 25 Nov 05:
297 > I'm tracking down a problem where V's signal handling doesn't
298 > work properly on a ppc440gx running 2.4.20. The problem is that
299 > the ucontext being presented to V's sighandler seems completely
302 > V's kernel headers and hence ucontext layout are derived from
303 > 2.6.9. I compared include/asm-ppc/ucontext.h from 2.4.20 and
306 > Can I just check my interpretation: the 2.4.20 one contains the
307 > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
308 > to said struct? And so if V is using the 2.6.13 struct then a
309 > 2.4.20 one will make no sense to it.
311 Not quite... what is inline in the 2.4.20 version is a
312 sigcontext_struct, not an mcontext. The sigcontext looks like
315 struct sigcontext_struct {
316 unsigned long _unused[4];
318 unsigned long handler;
319 unsigned long oldmask;
320 struct pt_regs *regs;
323 The regs pointer of that struct ends up at the same offset as the
324 uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
325 same as the mc_gregs field of the mcontext. In fact the integer
326 regs are followed in memory by the floating point regs on 2.4.20.
328 Thus if you are using the 2.6 definitions, it should work on 2.4.20
329 provided that you go via uc->uc_regs rather than looking in
330 uc->uc_mcontext directly.
332 There is another subtlety: 2.4.20 doesn't save the vector regs when
333 delivering a signal, and 2.6.x only saves the vector regs if the
334 process has ever used an altivec instructions. If 2.6.x does save
335 the vector regs, it sets the MSR_VEC bit in
336 uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it. That bit
337 will always be clear under 2.4.20. So you can use that bit to tell
338 whether uc->uc_regs->mc_vregs is valid. */
339 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
340 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
341 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
342 /* Convert the values in uc_mcontext r3,cr into a SysRes. */ \
343 VG_(mk_SysRes_ppc32_linux)( \
344 (uc)->uc_regs->mc_gregs[VKI_PT_R3], \
345 (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1) \
347 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
348 { (srP)->r_pc = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_NIP]); \
349 (srP)->r_sp = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_R1]); \
350 (srP)->misc.PPC32.r_lr = (uc)->uc_regs->mc_gregs[VKI_PT_LNK]; \
353 #elif defined(VGP_ppc64_linux)
354 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
355 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
356 /* Dubious hack: if there is an error, only consider the lowest 8
357 bits of r3. memcheck/tests/post-syscall shows a case where an
358 interrupted syscall should have produced a ucontext with 0x4
359 (VKI_EINTR) in r3 but is in fact producing 0x204. */
360 /* Awaiting clarification from PaulM. Evidently 0x204 is
361 ERESTART_RESTARTBLOCK, which shouldn't have made it into user
363 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext* uc )
365 ULong err = (uc->uc_mcontext.gp_regs[VKI_PT_CCR] >> 28) & 1;
366 ULong r3 = uc->uc_mcontext.gp_regs[VKI_PT_R3];
368 return VG_(mk_SysRes_ppc64_linux)( r3, err );
370 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
371 { (srP)->r_pc = (uc)->uc_mcontext.gp_regs[VKI_PT_NIP]; \
372 (srP)->r_sp = (uc)->uc_mcontext.gp_regs[VKI_PT_R1]; \
373 (srP)->misc.PPC64.r_lr = (uc)->uc_mcontext.gp_regs[VKI_PT_LNK]; \
376 #elif defined(VGP_arm_linux)
377 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.arm_pc)
378 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.arm_sp)
379 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
380 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
381 VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
382 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
383 { (srP)->r_pc = (uc)->uc_mcontext.arm_pc; \
384 (srP)->r_sp = (uc)->uc_mcontext.arm_sp; \
385 (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
386 (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
387 (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
388 (srP)->misc.ARM.r7 = (uc)->uc_mcontext.arm_r7; \
391 #elif defined(VGP_ppc32_aix5)
393 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
394 # include <ucontext.h>
395 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
396 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
397 ucontext_t* uc = (ucontext_t*)ucV;
398 struct __jmpbuf* mc = &(uc->uc_mcontext);
399 struct mstsave* jc = &mc->jmp_context;
402 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
403 ucontext_t* uc = (ucontext_t*)ucV;
404 struct __jmpbuf* mc = &(uc->uc_mcontext);
405 struct mstsave* jc = &mc->jmp_context;
408 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV ) {
409 ucontext_t* uc = (ucontext_t*)ucV;
410 struct __jmpbuf* mc = &(uc->uc_mcontext);
411 struct mstsave* jc = &mc->jmp_context;
412 return VG_(mk_SysRes_ppc32_aix5)( jc->gpr[3], jc->gpr[4] );
414 static inline Addr VG_UCONTEXT_LINK_REG( void* ucV ) {
415 ucontext_t* uc = (ucontext_t*)ucV;
416 struct __jmpbuf* mc = &(uc->uc_mcontext);
417 struct mstsave* jc = &mc->jmp_context;
420 static inline Addr VG_UCONTEXT_FRAME_PTR( void* ucV ) {
421 return VG_UCONTEXT_STACK_PTR(ucV);
424 #elif defined(VGP_ppc64_aix5)
426 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
427 # include <ucontext.h>
428 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
429 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
430 ucontext_t* uc = (ucontext_t*)ucV;
431 struct __jmpbuf* mc = &(uc->uc_mcontext);
432 struct __context64* jc = &mc->jmp_context;
435 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
436 ucontext_t* uc = (ucontext_t*)ucV;
437 struct __jmpbuf* mc = &(uc->uc_mcontext);
438 struct __context64* jc = &mc->jmp_context;
441 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV ) {
442 ucontext_t* uc = (ucontext_t*)ucV;
443 struct __jmpbuf* mc = &(uc->uc_mcontext);
444 struct __context64* jc = &mc->jmp_context;
445 return VG_(mk_SysRes_ppc32_aix5)( jc->gpr[3], jc->gpr[4] );
447 static inline Addr VG_UCONTEXT_LINK_REG( void* ucV ) {
448 ucontext_t* uc = (ucontext_t*)ucV;
449 struct __jmpbuf* mc = &(uc->uc_mcontext);
450 struct __context64* jc = &mc->jmp_context;
453 static inline Addr VG_UCONTEXT_FRAME_PTR( void* ucV ) {
454 return VG_UCONTEXT_STACK_PTR(ucV);
457 #elif defined(VGP_x86_darwin)
459 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
460 ucontext_t* uc = (ucontext_t*)ucV;
461 struct __darwin_mcontext32* mc = uc->uc_mcontext;
462 struct __darwin_i386_thread_state* ss = &mc->__ss;
465 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
466 ucontext_t* uc = (ucontext_t*)ucV;
467 struct __darwin_mcontext32* mc = uc->uc_mcontext;
468 struct __darwin_i386_thread_state* ss = &mc->__ss;
471 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
473 /* this is complicated by the problem that there are 3 different
474 kinds of syscalls, each with its own return convention.
475 NB: scclass is a host word, hence UWord is good for both
476 amd64-darwin and x86-darwin */
477 ucontext_t* uc = (ucontext_t*)ucV;
478 struct __darwin_mcontext32* mc = uc->uc_mcontext;
479 struct __darwin_i386_thread_state* ss = &mc->__ss;
480 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
481 UInt carry = 1 & ss->__eflags;
486 case VG_DARWIN_SYSCALL_CLASS_UNIX:
491 case VG_DARWIN_SYSCALL_CLASS_MACH:
494 case VG_DARWIN_SYSCALL_CLASS_MDEP:
501 return VG_(mk_SysRes_x86_darwin)( scclass, err ? True : False,
505 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
507 ucontext_t* uc = (ucontext_t*)(ucV);
508 struct __darwin_mcontext32* mc = uc->uc_mcontext;
509 struct __darwin_i386_thread_state* ss = &mc->__ss;
510 srP->r_pc = (ULong)(ss->__eip);
511 srP->r_sp = (ULong)(ss->__esp);
512 srP->misc.X86.r_ebp = (UInt)(ss->__ebp);
515 #elif defined(VGP_amd64_darwin)
517 static inline Addr VG_UCONTEXT_INSTR_PTR( void* ucV ) {
520 static inline Addr VG_UCONTEXT_STACK_PTR( void* ucV ) {
523 static inline SysRes VG_UCONTEXT_SYSCALL_SYSRES( void* ucV,
528 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs* srP,
533 #elif defined(VGO_l4re)
534 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
535 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
536 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.ebp)
537 # define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.eax)
538 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
539 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
540 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
541 # define VG_UCONTEXT_LINK_REG(uc) 0 /* Dude, where's my LR? */
544 #elif defined(VGP_s390x_linux)
546 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.regs.psw.addr)
547 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.regs.gprs[15])
548 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.regs.gprs[11])
549 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
550 VG_(mk_SysRes_s390x_linux)((uc)->uc_mcontext.regs.gprs[2])
551 # define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_mcontext.regs.gprs[14])
553 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
554 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.regs.psw.addr); \
555 (srP)->r_sp = (ULong)((uc)->uc_mcontext.regs.gprs[15]); \
556 (srP)->misc.S390X.r_fp = (uc)->uc_mcontext.regs.gprs[11]; \
557 (srP)->misc.S390X.r_lr = (uc)->uc_mcontext.regs.gprs[14]; \
562 # error Unknown platform
566 /* ------ Macros for pulling stuff out of siginfos ------ */
568 /* These macros allow use of uniform names when working with
569 both the Linux and AIX vki definitions. */
570 #if defined(VGO_linux)
571 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
572 # define VKI_SIGINFO_si_pid _sifields._kill._pid
573 #elif defined(VGO_aix5)
574 # define VKI_SIGINFO_si_addr si_addr
575 # define VKI_SIGINFO_si_pid si_pid
576 #elif defined(VGO_darwin)
577 # define VKI_SIGINFO_si_addr si_addr
578 # define VKI_SIGINFO_si_pid si_pid
579 #elif defined(VGO_l4re)
580 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
581 # define VKI_SIGINFO_si_pid _sifields._kill._pid
587 /* ---------------------------------------------------------------------
588 HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
589 ------------------------------------------------------------------ */
591 /* ---------------------------------------------------------------------
592 Signal state for this process.
593 ------------------------------------------------------------------ */
596 /* Base-ment of these arrays[_VKI_NSIG].
598 Valid signal numbers are 1 .. _VKI_NSIG inclusive.
599 Rather than subtracting 1 for indexing these arrays, which
600 is tedious and error-prone, they are simply dimensioned 1 larger,
601 and entry [0] is not used.
605 /* -----------------------------------------------------
606 Static client signal state (SCSS). This is the state
607 that the client thinks it has the kernel in.
608 SCSS records verbatim the client's settings. These
609 are mashed around only when SKSS is calculated from it.
610 -------------------------------------------------- */
614 void* scss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
617 vki_sigset_t scss_mask;
618 void* scss_restorer; /* where sigreturn goes */
619 void* scss_sa_tramp; /* sa_tramp setting, Darwin only */
620 /* re _restorer and _sa_tramp, we merely record the values
621 supplied when the client does 'sigaction' and give them back
622 when requested. Otherwise they are simply ignored. */
628 /* per-signal info */
629 SCSS_Per_Signal scss_per_sig[1+_VKI_NSIG];
631 /* Additional elements to SCSS not stored here:
632 - for each thread, the thread's blocking mask
633 - for each thread in WaitSIG, the set of waited-on sigs
641 /* -----------------------------------------------------
642 Static kernel signal state (SKSS). This is the state
643 that we have the kernel in. It is computed from SCSS.
644 -------------------------------------------------- */
647 sigprocmask assigns to all thread masks
648 so that at least everything is always consistent
650 SA_SIGINFO -- we always set it, and honour it for the client
651 SA_NOCLDSTOP -- passed to kernel
652 SA_ONESHOT or SA_RESETHAND -- pass through
653 SA_RESTART -- we observe this but set our handlers to always restart
654 SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
655 SA_ONSTACK -- pass through
656 SA_NOCLDWAIT -- pass through
662 void* skss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN
663 or ptr to our handler */
665 /* There is no skss_mask, since we know that we will always ask
666 for all signals to be blocked in our sighandlers. */
667 /* Also there is no skss_restorer. */
673 SKSS_Per_Signal skss_per_sig[1+_VKI_NSIG];
679 /* returns True if signal is to be ignored.
680 To check this, possibly call gdbserver with tid. */
681 static Bool is_sig_ign(Int sigNo, ThreadId tid)
683 vg_assert(sigNo >= 1 && sigNo <= _VKI_NSIG);
685 return scss.scss_per_sig[sigNo].scss_handler == VKI_SIG_IGN
686 || !VG_(gdbserver_report_signal) (sigNo, tid);
689 /* ---------------------------------------------------------------------
690 Compute the SKSS required by the current SCSS.
691 ------------------------------------------------------------------ */
694 void pp_SKSS ( void )
697 VG_(printf)("\n\nSKSS:\n");
698 for (sig = 1; sig <= _VKI_NSIG; sig++) {
699 VG_(printf)("sig %d: handler %p, flags 0x%x\n", sig,
700 skss.skss_per_sig[sig].skss_handler,
701 skss.skss_per_sig[sig].skss_flags );
706 /* This is the core, clever bit. Computation is as follows:
709 handler = if client has a handler, then our handler
710 else if client is DFL, then our handler as well
711 else (client must be IGN)
715 void calculate_SKSS_from_SCSS ( SKSS* dst )
721 for (sig = 1; sig <= _VKI_NSIG; sig++) {
725 scss_handler = scss.scss_per_sig[sig].scss_handler;
726 scss_flags = scss.scss_per_sig[sig].scss_flags;
734 /* For these, we always want to catch them and report, even
735 if the client code doesn't. */
736 skss_handler = sync_signalhandler;
740 /* Let the kernel handle SIGCONT unless the client is actually
745 /* For signals which are have a default action of Ignore,
746 only set a handler if the client has set a signal handler.
747 Otherwise the kernel will interrupt a syscall which
748 wouldn't have otherwise been interrupted. */
749 if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_DFL)
750 skss_handler = VKI_SIG_DFL;
751 else if (scss.scss_per_sig[sig].scss_handler == VKI_SIG_IGN)
752 skss_handler = VKI_SIG_IGN;
754 skss_handler = async_signalhandler;
758 // VKI_SIGVG* are runtime variables, so we can't make them
759 // cases in the switch, so we handle them in the 'default' case.
760 if (sig == VG_SIGVGKILL)
761 skss_handler = sigvgkill_handler;
763 if (scss_handler == VKI_SIG_IGN)
764 skss_handler = VKI_SIG_IGN;
766 skss_handler = async_signalhandler;
775 /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
776 skss_flags |= scss_flags & (VKI_SA_NOCLDSTOP | VKI_SA_NOCLDWAIT);
778 /* SA_ONESHOT: ignore client setting */
780 /* SA_RESTART: ignore client setting and always set it for us.
781 Though we never rely on the kernel to restart a
782 syscall, we observe whether it wanted to restart the syscall
783 or not, which is needed by
784 VG_(fixup_guest_state_after_syscall_interrupted) */
785 skss_flags |= VKI_SA_RESTART;
787 /* SA_NOMASK: ignore it */
789 /* SA_ONSTACK: client setting is irrelevant here */
790 /* We don't set a signal stack, so ignore */
792 /* always ask for SA_SIGINFO */
793 skss_flags |= VKI_SA_SIGINFO;
795 /* use our own restorer */
796 skss_flags |= VKI_SA_RESTORER;
798 /* Create SKSS entry for this signal. */
799 if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
800 dst->skss_per_sig[sig].skss_handler = skss_handler;
802 dst->skss_per_sig[sig].skss_handler = VKI_SIG_DFL;
804 dst->skss_per_sig[sig].skss_flags = skss_flags;
808 vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler == VKI_SIG_DFL);
809 vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler == VKI_SIG_DFL);
816 /* ---------------------------------------------------------------------
817 After a possible SCSS change, update SKSS and the kernel itself.
818 ------------------------------------------------------------------ */
820 // We need two levels of macro-expansion here to convert __NR_rt_sigreturn
821 // to a number before converting it to a string... sigh.
822 extern void my_sigreturn(void);
824 #if defined(VGP_x86_linux)
825 # define _MY_SIGRETURN(name) \
828 " movl $" #name ", %eax\n" \
832 #elif defined(VGP_amd64_linux)
833 # define _MY_SIGRETURN(name) \
836 " movq $" #name ", %rax\n" \
840 #elif defined(VGP_ppc32_linux)
841 # define _MY_SIGRETURN(name) \
844 " li 0, " #name "\n" \
848 #elif defined(VGP_ppc64_linux)
849 # define _MY_SIGRETURN(name) \
851 ".globl my_sigreturn\n" \
852 ".section \".opd\",\"aw\"\n" \
855 ".quad .my_sigreturn,.TOC.@tocbase,0\n" \
857 ".type .my_sigreturn,@function\n" \
858 ".globl .my_sigreturn\n" \
860 " li 0, " #name "\n" \
863 #elif defined(VGP_arm_linux)
864 # define _MY_SIGRETURN(name) \
866 "my_sigreturn:\n\t" \
867 " mov r7, #" #name "\n\t" \
868 " svc 0x00000000\n" \
871 #elif defined(VGP_ppc32_aix5)
872 # define _MY_SIGRETURN(name) \
873 ".globl my_sigreturn\n" \
876 #elif defined(VGP_ppc64_aix5)
877 # define _MY_SIGRETURN(name) \
878 ".globl my_sigreturn\n" \
882 #elif defined(VGP_x86_darwin)
883 # define _MY_SIGRETURN(name) \
886 "movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
889 #elif defined(VGP_amd64_darwin)
891 # define _MY_SIGRETURN(name) \
896 #elif defined(VGO_l4re)
897 # define _MY_SIGRETURN(name) \
898 ".globl my_sigreturn\n" \
902 #elif defined(VGP_s390x_linux)
903 # define _MY_SIGRETURN(name) \
910 # error Unknown platform
913 #define MY_SIGRETURN(name) _MY_SIGRETURN(name)
915 MY_SIGRETURN(__NR_rt_sigreturn)
919 static void handle_SCSS_change ( Bool force_update )
923 vki_sigaction_toK_t ksa;
924 vki_sigaction_fromK_t ksa_old;
926 /* Remember old SKSS and calculate new one. */
928 calculate_SKSS_from_SCSS ( &skss );
930 /* Compare the new SKSS entries vs the old ones, and update kernel
931 where they differ. */
932 for (sig = 1; sig <= VG_(max_signal); sig++) {
934 /* Trying to do anything with SIGKILL is pointless; just ignore
936 if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
940 if ((skss_old.skss_per_sig[sig].skss_handler
941 == skss.skss_per_sig[sig].skss_handler)
942 && (skss_old.skss_per_sig[sig].skss_flags
943 == skss.skss_per_sig[sig].skss_flags))
948 ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
949 ksa.sa_flags = skss.skss_per_sig[sig].skss_flags;
950 # if !defined(VGP_ppc32_linux) && \
951 !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
952 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
953 ksa.sa_restorer = my_sigreturn;
955 /* Re above ifdef (also the assertion below), PaulM says:
956 The sa_restorer field is not used at all on ppc. Glibc
957 converts the sigaction you give it into a kernel sigaction,
958 but it doesn't put anything in the sa_restorer field.
961 /* block all signals in handler */
962 VG_(sigfillset)( &ksa.sa_mask );
963 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGKILL );
964 VG_(sigdelset)( &ksa.sa_mask, VKI_SIGSTOP );
966 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
967 VG_(dmsg)("setting ksig %d to: hdlr %p, flags 0x%lx, "
968 "mask(msb..lsb) 0x%llx 0x%llx\n",
969 sig, ksa.ksa_handler,
971 _VKI_NSIG_WORDS > 1 ? (ULong)ksa.sa_mask.sig[1] : 0,
972 (ULong)ksa.sa_mask.sig[0]);
974 res = VG_(sigaction)( sig, &ksa, &ksa_old );
977 /* Since we got the old sigaction more or less for free, might
978 as well extract the maximum sanity-check value from it. */
980 vg_assert(ksa_old.ksa_handler
981 == skss_old.skss_per_sig[sig].skss_handler);
982 vg_assert(ksa_old.sa_flags
983 == skss_old.skss_per_sig[sig].skss_flags);
984 # if !defined(VGP_ppc32_linux) && \
985 !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
986 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
987 vg_assert(ksa_old.sa_restorer
990 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
991 VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGSTOP );
992 vg_assert(VG_(isfullsigset)( &ksa_old.sa_mask ));
998 /* ---------------------------------------------------------------------
999 Update/query SCSS in accordance with client requests.
1000 ------------------------------------------------------------------ */
1002 /* Logic for this alt-stack stuff copied directly from do_sigaltstack
1003 in kernel/signal.[ch] */
1005 /* True if we are on the alternate signal stack. */
1006 static Bool on_sig_stack ( ThreadId tid, Addr m_SP )
1008 ThreadState *tst = VG_(get_ThreadState)(tid);
1010 return (m_SP - (Addr)tst->altstack.ss_sp < (Addr)tst->altstack.ss_size);
1013 static Int sas_ss_flags ( ThreadId tid, Addr m_SP )
1015 ThreadState *tst = VG_(get_ThreadState)(tid);
1017 return (tst->altstack.ss_size == 0
1019 : on_sig_stack(tid, m_SP) ? VKI_SS_ONSTACK : 0);
1023 SysRes VG_(do_sys_sigaltstack) ( ThreadId tid, vki_stack_t* ss, vki_stack_t* oss )
1027 vg_assert(VG_(is_valid_tid)(tid));
1028 m_SP = VG_(get_SP)(tid);
1030 if (VG_(clo_trace_signals))
1031 VG_(dmsg)("sys_sigaltstack: tid %d, "
1032 "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)\n",
1035 (ULong)(ss ? ss->ss_size : 0),
1036 (ULong)(ss ? ss->ss_flags : 0),
1037 (void*)oss, (void*)m_SP);
1040 oss->ss_sp = VG_(threads)[tid].altstack.ss_sp;
1041 oss->ss_size = VG_(threads)[tid].altstack.ss_size;
1042 oss->ss_flags = VG_(threads)[tid].altstack.ss_flags
1043 | sas_ss_flags(tid, m_SP);
1047 if (on_sig_stack(tid, VG_(get_SP)(tid))) {
1048 return VG_(mk_SysRes_Error)( VKI_EPERM );
1050 if (ss->ss_flags != VKI_SS_DISABLE
1051 && ss->ss_flags != VKI_SS_ONSTACK
1052 && ss->ss_flags != 0) {
1053 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1055 if (ss->ss_flags == VKI_SS_DISABLE) {
1056 VG_(threads)[tid].altstack.ss_flags = VKI_SS_DISABLE;
1058 if (ss->ss_size < VKI_MINSIGSTKSZ) {
1059 return VG_(mk_SysRes_Error)( VKI_ENOMEM );
1062 VG_(threads)[tid].altstack.ss_sp = ss->ss_sp;
1063 VG_(threads)[tid].altstack.ss_size = ss->ss_size;
1064 VG_(threads)[tid].altstack.ss_flags = 0;
1067 return VG_(mk_SysRes_Success)( 0 );
1071 SysRes VG_(do_sys_sigaction) ( Int signo,
1072 const vki_sigaction_toK_t* new_act,
1073 vki_sigaction_fromK_t* old_act )
1075 if (VG_(clo_trace_signals))
1076 VG_(dmsg)("sys_sigaction: sigNo %d, "
1077 "new %#lx, old %#lx, new flags 0x%llx\n",
1078 signo, (UWord)new_act, (UWord)old_act,
1079 (ULong)(new_act ? new_act->sa_flags : 0));
1081 /* Rule out various error conditions. The aim is to ensure that if
1082 when the call is passed to the kernel it will definitely
1085 /* Reject out-of-range signal numbers. */
1086 if (signo < 1 || signo > VG_(max_signal)) goto bad_signo;
1088 /* don't let them use our signals */
1089 if ( (signo > VG_SIGVGRTUSERMAX)
1091 && !(new_act->ksa_handler == VKI_SIG_DFL
1092 || new_act->ksa_handler == VKI_SIG_IGN) )
1093 goto bad_signo_reserved;
1095 /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
1096 if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
1098 && new_act->ksa_handler != VKI_SIG_DFL)
1099 goto bad_sigkill_or_sigstop;
1101 /* If the client supplied non-NULL old_act, copy the relevant SCSS
1104 old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
1105 old_act->sa_flags = scss.scss_per_sig[signo].scss_flags;
1106 old_act->sa_mask = scss.scss_per_sig[signo].scss_mask;
1107 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
1108 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1109 old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
1113 /* And now copy new SCSS entry from new_act. */
1115 scss.scss_per_sig[signo].scss_handler = new_act->ksa_handler;
1116 scss.scss_per_sig[signo].scss_flags = new_act->sa_flags;
1117 scss.scss_per_sig[signo].scss_mask = new_act->sa_mask;
1119 scss.scss_per_sig[signo].scss_restorer = NULL;
1120 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
1121 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1122 scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
1125 scss.scss_per_sig[signo].scss_sa_tramp = NULL;
1126 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1127 scss.scss_per_sig[signo].scss_sa_tramp = new_act->sa_tramp;
1130 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGKILL);
1131 VG_(sigdelset)(&scss.scss_per_sig[signo].scss_mask, VKI_SIGSTOP);
1134 /* All happy bunnies ... */
1136 handle_SCSS_change( False /* lazy update */ );
1138 return VG_(mk_SysRes_Success)( 0 );
1141 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1142 VG_(umsg)("Warning: bad signal number %d in sigaction()\n", signo);
1144 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1147 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1148 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1150 VG_(umsg)(" the %s signal is used internally by Valgrind\n",
1153 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1155 bad_sigkill_or_sigstop:
1156 if (VG_(showing_core_errors)() && !VG_(clo_xml)) {
1157 VG_(umsg)("Warning: ignored attempt to set %s handler in sigaction();\n",
1159 VG_(umsg)(" the %s signal is uncatchable\n",
1162 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1167 void do_sigprocmask_bitops ( Int vki_how,
1168 vki_sigset_t* orig_set,
1169 vki_sigset_t* modifier )
1173 VG_(sigaddset_from_set)( orig_set, modifier );
1175 case VKI_SIG_UNBLOCK:
1176 VG_(sigdelset_from_set)( orig_set, modifier );
1178 case VKI_SIG_SETMASK:
1179 *orig_set = *modifier;
1182 VG_(core_panic)("do_sigprocmask_bitops");
1188 HChar* format_sigset ( const vki_sigset_t* set )
1190 static HChar buf[128];
1193 VG_(strcpy)(buf, "");
1195 for (w = _VKI_NSIG_WORDS - 1; w >= 0; w--)
1197 # if _VKI_NSIG_BPW == 32
1198 VG_(sprintf)(buf + VG_(strlen)(buf), "%08llx",
1199 set ? (ULong)set->sig[w] : 0);
1200 # elif _VKI_NSIG_BPW == 64
1201 VG_(sprintf)(buf + VG_(strlen)(buf), "%16llx",
1202 set ? (ULong)set->sig[w] : 0);
1204 # error "Unsupported value for _VKI_NSIG_BPW"
1212 This updates the thread's signal mask. There's no such thing as a
1213 process-wide signal mask.
1215 Note that the thread signal masks are an implicit part of SCSS,
1216 which is why this routine is allowed to mess with them.
1219 void do_setmask ( ThreadId tid,
1221 vki_sigset_t* newset,
1222 vki_sigset_t* oldset )
1224 if (VG_(clo_trace_signals))
1225 VG_(dmsg)("do_setmask: tid = %d how = %d (%s), newset = %p (%s)\n",
1227 how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
1228 how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
1229 how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
1230 newset, newset ? format_sigset(newset) : "NULL" );
1232 /* Just do this thread. */
1233 vg_assert(VG_(is_valid_tid)(tid));
1235 *oldset = VG_(threads)[tid].sig_mask;
1236 if (VG_(clo_trace_signals))
1237 VG_(dmsg)("\toldset=%p %s\n", oldset, format_sigset(oldset));
1240 do_sigprocmask_bitops (how, &VG_(threads)[tid].sig_mask, newset );
1241 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGKILL);
1242 VG_(sigdelset)(&VG_(threads)[tid].sig_mask, VKI_SIGSTOP);
1243 VG_(threads)[tid].tmp_sig_mask = VG_(threads)[tid].sig_mask;
1248 SysRes VG_(do_sys_sigprocmask) ( ThreadId tid,
1251 vki_sigset_t* oldset )
1255 case VKI_SIG_UNBLOCK:
1256 case VKI_SIG_SETMASK:
1257 vg_assert(VG_(is_valid_tid)(tid));
1258 do_setmask ( tid, how, set, oldset );
1259 return VG_(mk_SysRes_Success)( 0 );
1262 VG_(dmsg)("sigprocmask: unknown 'how' field %d\n", how);
1263 return VG_(mk_SysRes_Error)( VKI_EINVAL );
1268 /* ---------------------------------------------------------------------
1269 LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
1270 ------------------------------------------------------------------ */
1272 /* ---------------------------------------------------------------------
1273 Handy utilities to block/restore all host signals.
1274 ------------------------------------------------------------------ */
1276 /* Block all host signals, dumping the old mask in *saved_mask. */
1277 static void block_all_host_signals ( /* OUT */ vki_sigset_t* saved_mask )
1279 #if defined(VGO_l4re)
1280 VG_(unimplemented)((char *)__func__);
1283 vki_sigset_t block_procmask;
1284 VG_(sigfillset)(&block_procmask);
1285 ret = VG_(sigprocmask)
1286 (VKI_SIG_SETMASK, &block_procmask, saved_mask);
1287 vg_assert(ret == 0);
1291 /* Restore the blocking mask using the supplied saved one. */
1292 static void restore_all_host_signals ( /* IN */ vki_sigset_t* saved_mask )
1295 ret = VG_(sigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
1296 vg_assert(ret == 0);
1299 void VG_(clear_out_queued_signals)( ThreadId tid, vki_sigset_t* saved_mask )
1301 #if defined(VGO_l4re)
1302 VG_(unimplemented)((char *)__func__);
1304 block_all_host_signals(saved_mask);
1305 if (VG_(threads)[tid].sig_queue != NULL) {
1306 VG_(arena_free)(VG_AR_CORE, VG_(threads)[tid].sig_queue);
1307 VG_(threads)[tid].sig_queue = NULL;
1309 restore_all_host_signals(saved_mask);
1313 /* ---------------------------------------------------------------------
1314 The signal simulation proper. A simplified version of what the
1316 ------------------------------------------------------------------ */
1318 /* Set up a stack frame (VgSigContext) for the client's signal
1321 void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
1322 const struct vki_ucontext *uc )
1324 #if defined(VGO_l4re)
1325 VG_(unimplemented)((char *)__func__);
1327 Addr esp_top_of_frame;
1329 Int sigNo = siginfo->si_signo;
1331 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
1332 vg_assert(VG_(is_valid_tid)(tid));
1333 tst = & VG_(threads)[tid];
1335 if (VG_(clo_trace_signals)) {
1336 VG_(dmsg)("push_signal_frame (thread %d): signal %d\n", tid, sigNo);
1337 VG_(get_and_pp_StackTrace)(tid, 10);
1340 if (/* this signal asked to run on an alt stack */
1341 (scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONSTACK )
1342 && /* there is a defined and enabled alt stack, which we're not
1343 already using. Logic from get_sigframe in
1344 arch/i386/kernel/signal.c. */
1345 sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
1348 = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
1349 if (VG_(clo_trace_signals))
1350 VG_(dmsg)("delivering signal %d (%s) to thread %d: "
1351 "on ALT STACK (%p-%p; %ld bytes)\n",
1352 sigNo, signame(sigNo), tid, tst->altstack.ss_sp,
1353 (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
1354 (Word)tst->altstack.ss_size );
1356 /* Signal delivery to tools */
1357 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/True );
1360 esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
1362 /* Signal delivery to tools */
1363 VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
1366 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
1367 vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
1369 /* This may fail if the client stack is busted; if that happens,
1370 the whole process will exit rather than simply calling the
1372 VG_(sigframe_create) (tid, esp_top_of_frame, siginfo, uc,
1373 scss.scss_per_sig[sigNo].scss_handler,
1374 scss.scss_per_sig[sigNo].scss_flags,
1376 scss.scss_per_sig[sigNo].scss_restorer);
1381 static const Char *signame(Int sigNo)
1383 static Char buf[20];
1386 case VKI_SIGHUP: return "SIGHUP";
1387 case VKI_SIGINT: return "SIGINT";
1388 case VKI_SIGQUIT: return "SIGQUIT";
1389 case VKI_SIGILL: return "SIGILL";
1390 case VKI_SIGTRAP: return "SIGTRAP";
1391 case VKI_SIGABRT: return "SIGABRT";
1392 case VKI_SIGBUS: return "SIGBUS";
1393 case VKI_SIGFPE: return "SIGFPE";
1394 case VKI_SIGKILL: return "SIGKILL";
1395 case VKI_SIGUSR1: return "SIGUSR1";
1396 case VKI_SIGUSR2: return "SIGUSR2";
1397 case VKI_SIGSEGV: return "SIGSEGV";
1398 case VKI_SIGPIPE: return "SIGPIPE";
1399 case VKI_SIGALRM: return "SIGALRM";
1400 case VKI_SIGTERM: return "SIGTERM";
1401 # if defined(VKI_SIGSTKFLT)
1402 case VKI_SIGSTKFLT: return "SIGSTKFLT";
1404 case VKI_SIGCHLD: return "SIGCHLD";
1405 case VKI_SIGCONT: return "SIGCONT";
1406 case VKI_SIGSTOP: return "SIGSTOP";
1407 case VKI_SIGTSTP: return "SIGTSTP";
1408 case VKI_SIGTTIN: return "SIGTTIN";
1409 case VKI_SIGTTOU: return "SIGTTOU";
1410 case VKI_SIGURG: return "SIGURG";
1411 case VKI_SIGXCPU: return "SIGXCPU";
1412 case VKI_SIGXFSZ: return "SIGXFSZ";
1413 case VKI_SIGVTALRM: return "SIGVTALRM";
1414 case VKI_SIGPROF: return "SIGPROF";
1415 case VKI_SIGWINCH: return "SIGWINCH";
1416 case VKI_SIGIO: return "SIGIO";
1417 # if defined(VKI_SIGPWR)
1418 case VKI_SIGPWR: return "SIGPWR";
1420 # if defined(VKI_SIGUNUSED)
1421 case VKI_SIGUNUSED: return "SIGUNUSED";
1424 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1425 case VKI_SIGRTMIN ... VKI_SIGRTMAX:
1426 VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
1431 VG_(sprintf)(buf, "SIG%d", sigNo);
1436 /* Hit ourselves with a signal using the default handler */
1437 void VG_(kill_self)(Int sigNo)
1440 vki_sigset_t mask, origmask;
1441 vki_sigaction_toK_t sa, origsa2;
1442 vki_sigaction_fromK_t origsa;
1444 sa.ksa_handler = VKI_SIG_DFL;
1446 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
1447 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1450 VG_(sigemptyset)(&sa.sa_mask);
1452 VG_(sigaction)(sigNo, &sa, &origsa);
1454 VG_(sigemptyset)(&mask);
1455 VG_(sigaddset)(&mask, sigNo);
1456 VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
1458 r = VG_(kill)(VG_(getpid)(), sigNo);
1459 # if defined(VGO_linux)
1460 /* This sometimes fails with EPERM on Darwin. I don't know why. */
1464 VG_(convert_sigaction_fromK_to_toK)( &origsa, &origsa2 );
1465 VG_(sigaction)(sigNo, &origsa2, NULL);
1466 VG_(sigprocmask)(VKI_SIG_SETMASK, &origmask, NULL);
1469 // The si_code describes where the signal came from. Some come from the
1470 // kernel, eg.: seg faults, illegal opcodes. Some come from the user, eg.:
1471 // from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
1472 // request (SI_ASYNCIO). There's lots of implementation-defined leeway in
1473 // POSIX, but the user vs. kernal distinction is what we want here. We also
1474 // pass in some other details that can help when si_code is unreliable.
1475 static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
1477 #if defined(VGO_linux) || defined(VGO_aix5)
1478 // On Linux, SI_USER is zero, negative values are from the user, positive
1479 // values are from the kernel. There are SI_FROMUSER and SI_FROMKERNEL
1480 // macros but we don't use them here because other platforms don't have
1482 return ( si_code > VKI_SI_USER ? True : False );
1483 #elif defined(VGO_darwin)
1484 // On Darwin 9.6.0, the si_code is completely unreliable. It should be the
1485 // case that 0 means "user", and >0 means "kernel". But:
1486 // - For SIGSEGV, it seems quite reliable.
1487 // - For SIGBUS, it's always 2.
1488 // - For SIGFPE, it's often 0, even for kernel ones (eg.
1489 // div-by-integer-zero always gives zero).
1490 // - For SIGILL, it's unclear.
1491 // - For SIGTRAP, it's always 1.
1492 // You can see the "NOTIMP" (not implemented) status of a number of the
1493 // sub-cases in sys/signal.h. Hopefully future versions of Darwin will
1496 // If we're blocked waiting on a syscall, it must be a user signal, because
1497 // the kernel won't generate sync signals within syscalls.
1498 if (VG_(threads)[tid].status == VgTs_WaitSys) {
1501 // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
1502 } else if (SIGSEGV == signum) {
1503 return ( si_code > 0 ? True : False );
1505 // If it's anything else, assume it's kernel-generated. Reason being that
1506 // kernel-generated sync signals are more common, and it's probable that
1507 // misdiagnosing a user signal as a kernel signal is better than the
1512 #elif defined(VGO_l4re)
1519 // This is an arbitrary si_code that we only use internally. It corresponds
1520 // to the value SI_KERNEL on Linux, but that's not really of any significance
1521 // as far as I can determine.
1522 #define VKI_SEGV_MADE_UP_GPF 0x80
1525 Perform the default action of a signal. If the signal is fatal, it
1526 marks all threads as needing to exit, but it doesn't actually kill
1527 the process or thread.
1529 If we're not being quiet, then print out some more detail about
1530 fatal signals (esp. core dumping signals).
1532 static void default_action(const vki_siginfo_t *info, ThreadId tid)
1534 Int sigNo = info->si_signo;
1535 Bool terminate = False; /* kills process */
1536 Bool core = False; /* kills process w/ core */
1537 struct vki_rlimit corelim;
1540 vg_assert(VG_(is_running_thread)(tid));
1543 case VKI_SIGQUIT: /* core */
1544 case VKI_SIGILL: /* core */
1545 case VKI_SIGABRT: /* core */
1546 case VKI_SIGFPE: /* core */
1547 case VKI_SIGSEGV: /* core */
1548 case VKI_SIGBUS: /* core */
1549 case VKI_SIGTRAP: /* core */
1550 case VKI_SIGXCPU: /* core */
1551 case VKI_SIGXFSZ: /* core */
1556 case VKI_SIGHUP: /* term */
1557 case VKI_SIGINT: /* term */
1558 case VKI_SIGKILL: /* term - we won't see this */
1559 case VKI_SIGPIPE: /* term */
1560 case VKI_SIGALRM: /* term */
1561 case VKI_SIGTERM: /* term */
1562 case VKI_SIGUSR1: /* term */
1563 case VKI_SIGUSR2: /* term */
1564 case VKI_SIGIO: /* term */
1565 # if defined(VKI_SIGPWR)
1566 case VKI_SIGPWR: /* term */
1568 case VKI_SIGSYS: /* term */
1569 case VKI_SIGPROF: /* term */
1570 case VKI_SIGVTALRM: /* term */
1571 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1572 case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
1578 vg_assert(!core || (core && terminate));
1580 if (VG_(clo_trace_signals))
1581 VG_(dmsg)("delivering %d (code %d) to default handler; action: %s%s\n",
1582 sigNo, info->si_code, terminate ? "terminate" : "ignore",
1583 core ? "+core" : "");
1586 return; /* nothing to do */
1591 /* If they set the core-size limit to zero, don't generate a
1594 VG_(getrlimit)(VKI_RLIMIT_CORE, &corelim);
1596 if (corelim.rlim_cur == 0)
1600 if ( (VG_(clo_verbosity) > 1 ||
1601 (could_core && is_signal_from_kernel(tid, sigNo, info->si_code))
1606 "Process terminating with default action of signal %d (%s)%s\n",
1607 sigNo, signame(sigNo), core ? ": dumping core" : "");
1609 /* Be helpful - decode some more details about this fault */
1610 if (is_signal_from_kernel(tid, sigNo, info->si_code)) {
1611 const Char *event = NULL;
1612 Bool haveaddr = True;
1616 switch(info->si_code) {
1617 case VKI_SEGV_MAPERR: event = "Access not within mapped region";
1619 case VKI_SEGV_ACCERR: event = "Bad permissions for mapped region";
1621 case VKI_SEGV_MADE_UP_GPF:
1622 /* General Protection Fault: The CPU/kernel
1623 isn't telling us anything useful, but this
1624 is commonly the result of exceeding a
1626 event = "General Protection Fault";
1633 VG_(am_show_nsegments)(0,"post segfault");
1634 VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1641 switch(info->si_code) {
1642 case VKI_ILL_ILLOPC: event = "Illegal opcode"; break;
1643 case VKI_ILL_ILLOPN: event = "Illegal operand"; break;
1644 case VKI_ILL_ILLADR: event = "Illegal addressing mode"; break;
1645 case VKI_ILL_ILLTRP: event = "Illegal trap"; break;
1646 case VKI_ILL_PRVOPC: event = "Privileged opcode"; break;
1647 case VKI_ILL_PRVREG: event = "Privileged register"; break;
1648 case VKI_ILL_COPROC: event = "Coprocessor error"; break;
1649 case VKI_ILL_BADSTK: event = "Internal stack error"; break;
1654 switch (info->si_code) {
1655 case VKI_FPE_INTDIV: event = "Integer divide by zero"; break;
1656 case VKI_FPE_INTOVF: event = "Integer overflow"; break;
1657 case VKI_FPE_FLTDIV: event = "FP divide by zero"; break;
1658 case VKI_FPE_FLTOVF: event = "FP overflow"; break;
1659 case VKI_FPE_FLTUND: event = "FP underflow"; break;
1660 case VKI_FPE_FLTRES: event = "FP inexact"; break;
1661 case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
1662 case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
1667 switch (info->si_code) {
1668 case VKI_BUS_ADRALN: event = "Invalid address alignment"; break;
1669 case VKI_BUS_ADRERR: event = "Non-existent physical address"; break;
1670 case VKI_BUS_OBJERR: event = "Hardware error"; break;
1673 } /* switch (sigNo) */
1675 if (event != NULL) {
1677 VG_(umsg)(" %s at address %p\n",
1678 event, info->VKI_SIGINFO_si_addr);
1680 VG_(umsg)(" %s\n", event);
1683 /* Print a stack trace. Be cautious if the thread's SP is in an
1684 obviously stupid place (not mapped readable) that would
1685 likely cause a segfault. */
1686 if (VG_(is_valid_tid)(tid)) {
1687 ExeContext* ec = VG_(am_is_valid_for_client)
1688 (VG_(get_SP)(tid), sizeof(Addr), VKI_PROT_READ)
1689 ? VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ )
1690 : VG_(record_depth_1_ExeContext)( tid );
1692 VG_(pp_ExeContext)( ec );
1694 if (sigNo == VKI_SIGSEGV
1695 && info && is_signal_from_kernel(tid, sigNo, info->si_code)
1696 && info->si_code == VKI_SEGV_MAPERR) {
1697 VG_(umsg)(" If you believe this happened as a result of a stack\n" );
1698 VG_(umsg)(" overflow in your program's main thread (unlikely but\n");
1699 VG_(umsg)(" possible), you can try to increase the size of the\n" );
1700 VG_(umsg)(" main thread stack using the --main-stacksize= flag.\n" );
1701 // FIXME: assumes main ThreadId == 1
1702 if (VG_(is_valid_tid)(1)) {
1704 " The main thread stack size used in this run was %d.\n",
1705 (Int)VG_(threads)[1].client_stack_szB);
1710 if (VG_(is_action_requested)( "Attach to debugger", & VG_(clo_db_attach) )) {
1711 VG_(start_debugger)( tid );
1715 const static struct vki_rlimit zero = { 0, 0 };
1717 VG_(make_coredump)(tid, info, corelim.rlim_cur);
1719 /* Make sure we don't get a confusing kernel-generated
1720 coredump when we finally exit */
1721 VG_(setrlimit)(VKI_RLIMIT_CORE, &zero);
1724 /* stash fatal signal in main thread */
1726 //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
1729 VG_(nuke_all_threads_except)(tid, VgSrc_FatalSig);
1730 VG_(threads)[tid].exitreason = VgSrc_FatalSig;
1731 VG_(threads)[tid].os_state.fatalsig = sigNo;
1735 This does the business of delivering a signal to a thread. It may
1736 be called from either a real signal handler, or from normal code to
1737 cause the thread to enter the signal handler.
1739 This updates the thread state, but it does not set it to be
1742 static void deliver_signal ( ThreadId tid, const vki_siginfo_t *info,
1743 const struct vki_ucontext *uc )
1745 #if defined(VGO_l4re)
1746 VG_(unimplemented)((char *)__func__);
1748 Int sigNo = info->si_signo;
1749 SCSS_Per_Signal *handler = &scss.scss_per_sig[sigNo];
1751 ThreadState *tst = VG_(get_ThreadState)(tid);
1753 if (VG_(clo_trace_signals))
1754 VG_(dmsg)("delivering signal %d (%s):%d to thread %d\n",
1755 sigNo, signame(sigNo), info->si_code, tid );
1757 if (sigNo == VG_SIGVGKILL) {
1758 /* If this is a SIGVGKILL, we're expecting it to interrupt any
1759 blocked syscall. It doesn't matter whether the VCPU state is
1760 set to restart or not, because we don't expect it will
1761 execute any more client instructions. */
1762 vg_assert(VG_(is_exiting)(tid));
1766 /* If the client specifies SIG_IGN, treat it as SIG_DFL.
1768 If deliver_signal() is being called on a thread, we want
1769 the signal to get through no matter what; if they're ignoring
1770 it, then we do this override (this is so we can send it SIGSEGV,
1772 handler_fn = handler->scss_handler;
1773 if (handler_fn == VKI_SIG_IGN)
1774 handler_fn = VKI_SIG_DFL;
1776 vg_assert(handler_fn != VKI_SIG_IGN);
1778 if (handler_fn == VKI_SIG_DFL) {
1779 default_action(info, tid);
1781 /* Create a signal delivery frame, and set the client's %ESP and
1782 %EIP so that when execution continues, we will enter the
1783 signal handler with the frame on top of the client's stack,
1786 Signal delivery can fail if the client stack is too small or
1787 missing, and we can't push the frame. If that happens,
1788 push_signal_frame will cause the whole process to exit when
1789 we next hit the scheduler.
1791 vg_assert(VG_(is_valid_tid)(tid));
1793 push_signal_frame ( tid, info, uc );
1795 if (handler->scss_flags & VKI_SA_ONESHOT) {
1796 /* Do the ONESHOT thing. */
1797 handler->scss_handler = VKI_SIG_DFL;
1799 handle_SCSS_change( False /* lazy update */ );
1803 tst->sig_mask is the current signal mask
1804 tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
1805 handler->scss_mask is the mask set by the handler
1807 Handler gets a mask of tmp_sig_mask|handler_mask|signo
1809 tst->sig_mask = tst->tmp_sig_mask;
1810 if (!(handler->scss_flags & VKI_SA_NOMASK)) {
1811 VG_(sigaddset_from_set)(&tst->sig_mask, &handler->scss_mask);
1812 VG_(sigaddset)(&tst->sig_mask, sigNo);
1813 tst->tmp_sig_mask = tst->sig_mask;
1818 /* Thread state is ready to go - just add Runnable */
1821 static void resume_scheduler(ThreadId tid)
1823 ThreadState *tst = VG_(get_ThreadState)(tid);
1825 vg_assert(tst->os_state.lwpid == VG_(gettid)());
1827 if (tst->sched_jmpbuf_valid) {
1828 /* Can't continue; must longjmp back to the scheduler and thus
1829 enter the sighandler immediately. */
1830 VG_MINIMAL_LONGJMP(tst->sched_jmpbuf);
1834 static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
1836 #if defined(VGO_l4re)
1837 VG_(get_and_pp_StackTrace)( tid, VG_(clo_backtrace_size) );
1838 VG_(message)(Vg_DebugMsg,"%s(tid=%d, addr=%p, si_code=%d)\n", __func__, tid, addr, si_code);
1840 enter_kdebug("synth_fault_common");
1845 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1847 VG_(memset)(&info, 0, sizeof(info));
1848 info.si_signo = VKI_SIGSEGV;
1849 info.si_code = si_code;
1850 info.VKI_SIGINFO_si_addr = (void*)addr;
1852 /* even if gdbserver indicates to ignore the signal, we will deliver it */
1853 VG_(gdbserver_report_signal) (VKI_SIGSEGV, tid);
1855 /* If they're trying to block the signal, force it to be delivered */
1856 if (VG_(sigismember)(&VG_(threads)[tid].sig_mask, VKI_SIGSEGV))
1857 VG_(set_default_handler)(VKI_SIGSEGV);
1859 deliver_signal(tid, &info, NULL);
1863 // Synthesize a fault where the address is OK, but the page
1864 // permissions are bad.
1865 void VG_(synth_fault_perms)(ThreadId tid, Addr addr)
1867 synth_fault_common(tid, addr, VKI_SEGV_ACCERR);
1870 // Synthesize a fault where the address there's nothing mapped at the address.
1871 void VG_(synth_fault_mapping)(ThreadId tid, Addr addr)
1873 synth_fault_common(tid, addr, VKI_SEGV_MAPERR);
1876 // Synthesize a misc memory fault.
1877 void VG_(synth_fault)(ThreadId tid)
1879 synth_fault_common(tid, 0, VKI_SEGV_MADE_UP_GPF);
1882 // Synthesise a SIGILL.
1883 void VG_(synth_sigill)(ThreadId tid, Addr addr)
1887 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1889 VG_(memset)(&info, 0, sizeof(info));
1890 info.si_signo = VKI_SIGILL;
1891 info.si_code = VKI_ILL_ILLOPC; /* jrs: no idea what this should be */
1892 info.VKI_SIGINFO_si_addr = (void*)addr;
1894 if (VG_(gdbserver_report_signal) (VKI_SIGILL, tid)) {
1895 resume_scheduler(tid);
1896 deliver_signal(tid, &info, NULL);
1899 resume_scheduler(tid);
1902 // Synthesise a SIGBUS.
1903 void VG_(synth_sigbus)(ThreadId tid)
1907 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1909 VG_(memset)(&info, 0, sizeof(info));
1910 info.si_signo = VKI_SIGBUS;
1911 /* There are several meanings to SIGBUS (as per POSIX, presumably),
1912 but the most widely understood is "invalid address alignment",
1913 so let's use that. */
1914 info.si_code = VKI_BUS_ADRALN;
1915 /* If we knew the invalid address in question, we could put it
1916 in .si_addr. Oh well. */
1917 /* info.VKI_SIGINFO_si_addr = (void*)addr; */
1919 if (VG_(gdbserver_report_signal) (VKI_SIGBUS, tid)) {
1920 resume_scheduler(tid);
1921 deliver_signal(tid, &info, NULL);
1924 resume_scheduler(tid);
1927 // Synthesise a SIGTRAP.
1928 void VG_(synth_sigtrap)(ThreadId tid)
1931 struct vki_ucontext uc;
1932 # if defined(VGP_x86_darwin)
1933 struct __darwin_mcontext32 mc;
1934 # elif defined(VGP_amd64_darwin)
1935 struct __darwin_mcontext64 mc;
1938 vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
1940 VG_(memset)(&info, 0, sizeof(info));
1941 VG_(memset)(&uc, 0, sizeof(uc));
1942 info.si_signo = VKI_SIGTRAP;
1943 info.si_code = VKI_TRAP_BRKPT; /* tjh: only ever called for a brkpt ins */
1945 # if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
1946 uc.uc_mcontext.trapno = 3; /* tjh: this is the x86 trap number
1947 for a breakpoint trap... */
1948 uc.uc_mcontext.err = 0; /* tjh: no error code for x86
1949 breakpoint trap... */
1950 # elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1951 /* the same thing, but using Darwin field/struct names */
1952 VG_(memset)(&mc, 0, sizeof(mc));
1953 uc.uc_mcontext = &mc;
1954 uc.uc_mcontext->__es.__trapno = 3;
1955 uc.uc_mcontext->__es.__err = 0;
1958 /* fixs390: do we need to do anything here for s390 ? */
1959 if (VG_(gdbserver_report_signal) (VKI_SIGTRAP, tid)) {
1960 resume_scheduler(tid);
1961 deliver_signal(tid, &info, &uc);
1964 resume_scheduler(tid);
1967 /* Make a signal pending for a thread, for later delivery.
1968 VG_(poll_signals) will arrange for it to be delivered at the right
1971 tid==0 means add it to the process-wide queue, and not sent it to a
1975 void queue_signal(ThreadId tid, const vki_siginfo_t *si)
1979 vki_sigset_t savedmask;
1981 tst = VG_(get_ThreadState)(tid);
1983 /* Protect the signal queue against async deliveries */
1984 block_all_host_signals(&savedmask);
1986 if (tst->sig_queue == NULL) {
1987 tst->sig_queue = VG_(arena_malloc)(VG_AR_CORE, "signals.qs.1",
1988 sizeof(*tst->sig_queue));
1989 VG_(memset)(tst->sig_queue, 0, sizeof(*tst->sig_queue));
1991 sq = tst->sig_queue;
1993 if (VG_(clo_trace_signals))
1994 VG_(dmsg)("Queueing signal %d (idx %d) to thread %d\n",
1995 si->si_signo, sq->next, tid);
1997 /* Add signal to the queue. If the queue gets overrun, then old
1998 queued signals may get lost.
2000 XXX We should also keep a sigset of pending signals, so that at
2001 least a non-siginfo signal gets deliviered.
2003 if (sq->sigs[sq->next].si_signo != 0)
2004 VG_(umsg)("Signal %d being dropped from thread %d's queue\n",
2005 sq->sigs[sq->next].si_signo, tid);
2007 sq->sigs[sq->next] = *si;
2008 sq->next = (sq->next+1) % N_QUEUED_SIGNALS;
2010 restore_all_host_signals(&savedmask);
2014 Returns the next queued signal for thread tid which is in "set".
2015 tid==0 means process-wide signal. Set si_signo to 0 when the
2016 signal has been delivered.
2018 Must be called with all signals blocked, to protect against async
2021 static vki_siginfo_t *next_queued(ThreadId tid, const vki_sigset_t *set)
2023 ThreadState *tst = VG_(get_ThreadState)(tid);
2026 vki_siginfo_t *ret = NULL;
2028 sq = tst->sig_queue;
2035 VG_(printf)("idx=%d si_signo=%d inset=%d\n", idx,
2036 sq->sigs[idx].si_signo,
2037 VG_(sigismember)(set, sq->sigs[idx].si_signo));
2039 if (sq->sigs[idx].si_signo != 0
2040 && VG_(sigismember)(set, sq->sigs[idx].si_signo)) {
2041 if (VG_(clo_trace_signals))
2042 VG_(dmsg)("Returning queued signal %d (idx %d) for thread %d\n",
2043 sq->sigs[idx].si_signo, idx, tid);
2044 ret = &sq->sigs[idx];
2048 idx = (idx + 1) % N_QUEUED_SIGNALS;
2049 } while(idx != sq->next);
2054 static int sanitize_si_code(int si_code)
2056 #if defined(VGO_linux) || defined(VGO_l4re)
2057 /* The linux kernel uses the top 16 bits of si_code for it's own
2058 use and only exports the bottom 16 bits to user space - at least
2059 that is the theory, but it turns out that there are some kernels
2060 around that forget to mask out the top 16 bits so we do it here.
2062 The kernel treats the bottom 16 bits as signed and (when it does
2063 mask them off) sign extends them when exporting to user space so
2064 we do the same thing here. */
2065 return (Short)si_code;
2066 #elif defined(VGO_aix5) || defined(VGO_darwin)
2074 Receive an async signal from the kernel.
2076 This should only happen when the thread is blocked in a syscall,
2077 since that's the only time this set of signals is unblocked.
2080 void async_signalhandler ( Int sigNo,
2081 vki_siginfo_t *info, struct vki_ucontext *uc )
2083 #if !defined(VGO_l4re)
2084 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2085 ThreadState* tst = VG_(get_ThreadState)(tid);
2088 /* The thread isn't currently running, make it so before going on */
2089 vg_assert(tst->status == VgTs_WaitSys);
2090 VG_(acquire_BigLock)(tid, "async_signalhandler");
2092 info->si_code = sanitize_si_code(info->si_code);
2094 if (VG_(clo_trace_signals))
2095 VG_(dmsg)("async signal handler: signal=%d, tid=%d, si_code=%d\n",
2096 sigNo, tid, info->si_code);
2098 /* Update thread state properly. The signal can only have been
2099 delivered whilst we were in
2100 coregrind/m_syswrap/syscall-<PLAT>.S, and only then in the
2101 window between the two sigprocmask calls, since at all other
2102 times, we run with async signals on the host blocked. Hence
2103 make enquiries on the basis that we were in or very close to a
2104 syscall, and attempt to fix up the guest state accordingly.
2106 (normal async signals occurring during computation are blocked,
2107 but periodically polled for using VG_(sigtimedwait_zero), and
2108 delivered at a point convenient for us. Hence this routine only
2109 deals with signals that are delivered to a thread during a
2112 /* First, extract a SysRes from the ucontext_t* given to this
2113 handler. If it is subsequently established by
2114 VG_(fixup_guest_state_after_syscall_interrupted) that the
2115 syscall was complete but the results had not been committed yet
2116 to the guest state, then it'll have to commit the results itself
2117 "by hand", and so we need to extract the SysRes. Of course if
2118 the thread was not in that particular window then the
2119 SysRes will be meaningless, but that's OK too because
2120 VG_(fixup_guest_state_after_syscall_interrupted) will detect
2121 that the thread was not in said window and ignore the SysRes. */
2123 /* To make matters more complex still, on Darwin we need to know
2124 the "class" of the syscall under consideration in order to be
2125 able to extract the a correct SysRes. The class will have been
2126 saved just before the syscall, by VG_(client_syscall), into this
2127 thread's tst->arch.vex.guest_SC_CLASS. Hence: */
2128 # if defined(VGO_darwin)
2129 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc, tst->arch.vex.guest_SC_CLASS);
2131 sres = VG_UCONTEXT_SYSCALL_SYSRES(uc);
2135 VG_(fixup_guest_state_after_syscall_interrupted)(
2137 VG_UCONTEXT_INSTR_PTR(uc),
2139 !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
2143 /* Set up the thread's state to deliver a signal */
2144 if (!is_sig_ign(info->si_signo, tid))
2145 deliver_signal(tid, info, uc);
2147 /* It's crucial that (1) and (2) happen in the order (1) then (2)
2148 and not the other way around. (1) fixes up the guest thread
2149 state to reflect the fact that the syscall was interrupted --
2150 either to restart the syscall or to return EINTR. (2) then sets
2151 up the thread state to deliver the signal. Then we resume
2152 execution. First, the signal handler is run, since that's the
2153 second adjustment we made to the thread state. If that returns,
2154 then we resume at the guest state created by (1), viz, either
2155 the syscall returns EINTR or is restarted.
2157 If (2) was done before (1) the outcome would be completely
2158 different, and wrong. */
2160 /* longjmp back to the thread's main loop to start executing the
2162 resume_scheduler(tid);
2164 VG_(core_panic)("async_signalhandler: got unexpected signal "
2165 "while outside of scheduler");
2167 VG_(unimplemented)("unimplemented function async_signalhandler()");
2171 /* Extend the stack to cover addr. maxsize is the limit the stack can grow to.
2173 Returns True on success, False on failure.
2175 Succeeds without doing anything if addr is already within a segment.
2177 Failure could be caused by:
2178 - addr not below a growable segment
2179 - new stack size would exceed maxsize
2180 - mmap failed for some other reason
2182 Bool VG_(extend_stack)(Addr addr, UInt maxsize)
2186 /* Find the next Segment above addr */
2188 = VG_(am_find_nsegment)(addr);
2189 NSegment const* seg_next
2190 = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2193 if (seg && seg->kind == SkAnonC)
2194 /* addr is already mapped. Nothing to do. */
2197 /* Check that the requested new base is in a shrink-down
2198 reservation section which abuts an anonymous mapping that
2199 belongs to the client. */
2201 && seg->kind == SkResvn
2202 && seg->smode == SmUpper
2204 && seg_next->kind == SkAnonC
2205 && seg->end+1 == seg_next->start))
2208 udelta = VG_PGROUNDUP(seg_next->start - addr);
2209 VG_(debugLog)(1, "signals",
2210 "extending a stack base 0x%llx down by %lld\n",
2211 (ULong)seg_next->start, (ULong)udelta);
2212 if (! VG_(am_extend_into_adjacent_reservation_client)
2213 ( (NSegment*)seg_next, -(SSizeT)udelta )) {
2214 VG_(debugLog)(1, "signals", "extending a stack base: FAILED\n");
2218 /* When we change the main stack, we have to let the stack handling
2219 code know about it. */
2220 VG_(change_stack)(VG_(clstk_id), addr, VG_(clstk_end));
2222 if (VG_(clo_sanity_level) > 2)
2223 VG_(sanity_check_general)(False);
2228 static void (*fault_catcher)(Int sig, Addr addr) = NULL;
2230 void VG_(set_fault_catcher)(void (*catcher)(Int, Addr))
2233 VG_(debugLog)(0, "signals", "set fault catcher to %p\n", catcher);
2234 vg_assert2(NULL == catcher || NULL == fault_catcher,
2235 "Fault catcher is already registered");
2237 fault_catcher = catcher;
2241 void sync_signalhandler_from_user ( ThreadId tid,
2242 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2246 /* If some user-process sent us a sync signal (ie. it's not the result
2247 of a faulting instruction), then how we treat it depends on when it
2250 if (VG_(threads)[tid].status == VgTs_WaitSys) {
2251 /* Signal arrived while we're blocked in a syscall. This means that
2252 the client's signal mask was applied. In other words, so we can't
2253 get here unless the client wants this signal right now. This means
2254 we can simply use the async_signalhandler. */
2255 if (VG_(clo_trace_signals))
2256 VG_(dmsg)("Delivering user-sent sync signal %d as async signal\n",
2259 async_signalhandler(sigNo, info, uc);
2260 VG_(core_panic)("async_signalhandler returned!?\n");
2263 /* Signal arrived while in generated client code, or while running
2264 Valgrind core code. That means that every thread has these signals
2265 unblocked, so we can't rely on the kernel to route them properly, so
2266 we need to queue them manually. */
2267 if (VG_(clo_trace_signals))
2268 VG_(dmsg)("Routing user-sent sync signal %d via queue\n", sigNo);
2270 # if defined(VGO_linux)
2271 /* On Linux, first we have to do a sanity check of the siginfo. */
2272 if (info->VKI_SIGINFO_si_pid == 0) {
2273 /* There's a per-user limit of pending siginfo signals. If
2274 you exceed this, by having more than that number of
2275 pending signals with siginfo, then new signals are
2276 delivered without siginfo. This condition can be caused
2277 by any unrelated program you're running at the same time
2278 as Valgrind, if it has a large number of pending siginfo
2279 signals which it isn't taking delivery of.
2281 Since we depend on siginfo to work out why we were sent a
2282 signal and what we should do about it, we really can't
2283 continue unless we get it. */
2284 VG_(umsg)("Signal %d (%s) appears to have lost its siginfo; "
2285 "I can't go on.\n", sigNo, signame(sigNo));
2287 " This may be because one of your programs has consumed your ration of\n"
2288 " siginfo structures. For more information, see:\n"
2289 " http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
2290 " Basically, some program on your system is building up a large queue of\n"
2291 " pending signals, and this causes the siginfo data for other signals to\n"
2292 " be dropped because it's exceeding a system limit. However, Valgrind\n"
2293 " absolutely needs siginfo for SIGSEGV. A workaround is to track down the\n"
2294 " offending program and avoid running it while using Valgrind, but there\n"
2295 " is no easy way to do this. Apparently the problem was fixed in kernel\n"
2298 /* It's a fatal signal, so we force the default handler. */
2299 VG_(set_default_handler)(sigNo);
2300 deliver_signal(tid, info, uc);
2301 resume_scheduler(tid);
2302 VG_(exit)(99); /* If we can't resume, then just exit */
2306 qtid = 0; /* shared pending by default */
2307 # if defined(VGO_linux)
2308 if (info->si_code == VKI_SI_TKILL)
2309 qtid = tid; /* directed to us specifically */
2311 queue_signal(qtid, info);
2315 /* Returns the reported fault address for an exact address */
2316 static Addr fault_mask(Addr in)
2318 /* We have to use VG_PGROUNDDN because faults on s390x only deliver
2319 the page address but not the address within a page.
2321 # if defined(VGA_s390x)
2322 return VG_PGROUNDDN(in);
2328 /* Returns True if the sync signal was due to the stack requiring extension
2329 and the extension was successful.
2331 static Bool extend_stack_if_appropriate(ThreadId tid, vki_siginfo_t* info)
2335 NSegment const* seg;
2336 NSegment const* seg_next;
2338 if (info->si_signo != VKI_SIGSEGV)
2341 fault = (Addr)info->VKI_SIGINFO_si_addr;
2342 esp = VG_(get_SP)(tid);
2343 seg = VG_(am_find_nsegment)(fault);
2344 seg_next = seg ? VG_(am_next_nsegment)( (NSegment*)seg, True/*fwds*/ )
2347 if (VG_(clo_trace_signals)) {
2349 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2351 info->si_code, fault, tid, esp);
2353 VG_(dmsg)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2355 info->si_code, fault, tid, esp, seg->start, seg->end);
2358 if (info->si_code == VKI_SEGV_MAPERR
2360 && seg->kind == SkResvn
2361 && seg->smode == SmUpper
2363 && seg_next->kind == SkAnonC
2364 && seg->end+1 == seg_next->start
2365 && fault >= fault_mask(esp - VG_STACK_REDZONE_SZB)) {
2366 /* If the fault address is above esp but below the current known
2367 stack segment base, and it was a fault because there was
2368 nothing mapped there (as opposed to a permissions fault),
2369 then extend the stack segment.
2371 Addr base = VG_PGROUNDDN(esp - VG_STACK_REDZONE_SZB);
2372 if (VG_(extend_stack)(base, VG_(threads)[tid].client_stack_szB)) {
2373 if (VG_(clo_trace_signals))
2374 VG_(dmsg)(" -> extended stack base to %#lx\n",
2375 VG_PGROUNDDN(fault));
2378 VG_(umsg)("Stack overflow in thread %d: can't grow stack to %#lx\n",
2388 void sync_signalhandler_from_kernel ( ThreadId tid,
2389 Int sigNo, vki_siginfo_t *info, struct vki_ucontext *uc )
2391 /* Check to see if some part of Valgrind itself is interested in faults.
2392 The fault catcher should never be set whilst we're in generated code, so
2393 check for that. AFAIK the only use of the catcher right now is
2394 memcheck's leak detector. */
2395 if (fault_catcher) {
2396 vg_assert(VG_(in_generated_code) == False);
2398 (*fault_catcher)(sigNo, (Addr)info->VKI_SIGINFO_si_addr);
2399 /* If the catcher returns, then it didn't handle the fault,
2400 so carry on panicking. */
2403 if (extend_stack_if_appropriate(tid, info)) {
2404 /* Stack extension occurred, so we don't need to do anything else; upon
2405 returning from this function, we'll restart the host (hence guest)
2408 /* OK, this is a signal we really have to deal with. If it came
2409 from the client's code, then we can jump back into the scheduler
2410 and have it delivered. Otherwise it's a Valgrind bug. */
2411 ThreadState *tst = VG_(get_ThreadState)(tid);
2413 if (VG_(sigismember)(&tst->sig_mask, sigNo)) {
2414 /* signal is blocked, but they're not allowed to block faults */
2415 VG_(set_default_handler)(sigNo);
2418 if (VG_(in_generated_code)) {
2419 if (VG_(gdbserver_report_signal) (sigNo, tid)
2420 || VG_(sigismember)(&tst->sig_mask, sigNo)) {
2421 /* Can't continue; must longjmp back to the scheduler and thus
2422 enter the sighandler immediately. */
2423 deliver_signal(tid, info, uc);
2424 resume_scheduler(tid);
2427 resume_scheduler(tid);
2430 /* If resume_scheduler returns or its our fault, it means we
2431 don't have longjmp set up, implying that we weren't running
2432 client code, and therefore it was actually generated by
2433 Valgrind internally.
2435 VG_(dmsg)("VALGRIND INTERNAL ERROR: Valgrind received "
2436 "a signal %d (%s) - exiting\n",
2437 sigNo, signame(sigNo));
2439 VG_(dmsg)("si_code=%x; Faulting address: %p; sp: %#lx\n",
2440 info->si_code, info->VKI_SIGINFO_si_addr,
2441 VG_UCONTEXT_STACK_PTR(uc));
2444 VG_(kill_self)(sigNo); /* generate a core dump */
2446 //if (tid == 0) /* could happen after everyone has exited */
2447 // tid = VG_(master_tid);
2448 vg_assert(tid != 0);
2450 UnwindStartRegs startRegs;
2451 VG_(memset)(&startRegs, 0, sizeof(startRegs));
2453 VG_UCONTEXT_TO_UnwindStartRegs(&startRegs, uc);
2454 VG_(core_panic_at)("Killed by fatal signal", &startRegs);
2459 Receive a sync signal from the host.
2462 void sync_signalhandler ( Int sigNo,
2463 vki_siginfo_t *info, struct vki_ucontext *uc )
2465 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2469 VG_(printf)("sync_sighandler(%d, %p, %p)\n", sigNo, info, uc);
2471 vg_assert(info != NULL);
2472 vg_assert(info->si_signo == sigNo);
2473 vg_assert(sigNo == VKI_SIGSEGV ||
2474 sigNo == VKI_SIGBUS ||
2475 sigNo == VKI_SIGFPE ||
2476 sigNo == VKI_SIGILL ||
2477 sigNo == VKI_SIGTRAP);
2479 info->si_code = sanitize_si_code(info->si_code);
2481 from_user = !is_signal_from_kernel(tid, sigNo, info->si_code);
2483 if (VG_(clo_trace_signals)) {
2484 VG_(dmsg)("sync signal handler: "
2485 "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s\n",
2486 sigNo, info->si_code, VG_(get_IP)(tid),
2487 VG_UCONTEXT_INSTR_PTR(uc),
2488 ( from_user ? "user" : "kernel" ));
2490 vg_assert(sigNo >= 1 && sigNo <= VG_(max_signal));
2494 VG_(printf)("info->si_signo %d\n", info->si_signo);
2495 VG_(printf)("info->si_errno %d\n", info->si_errno);
2496 VG_(printf)("info->si_code %d\n", info->si_code);
2497 VG_(printf)("info->si_pid %d\n", info->si_pid);
2498 VG_(printf)("info->si_uid %d\n", info->si_uid);
2499 VG_(printf)("info->si_status %d\n", info->si_status);
2500 VG_(printf)("info->si_addr %p\n", info->si_addr);
2504 /* Figure out if the signal is being sent from outside the process.
2505 (Why do we care?) If the signal is from the user rather than the
2506 kernel, then treat it more like an async signal than a sync signal --
2507 that is, merely queue it for later delivery. */
2509 sync_signalhandler_from_user( tid, sigNo, info, uc);
2511 sync_signalhandler_from_kernel(tid, sigNo, info, uc);
2517 Kill this thread. Makes it leave any syscall it might be currently
2518 blocked in, and return to the scheduler. This doesn't mark the thread
2519 as exiting; that's the caller's job.
2521 static void sigvgkill_handler(int signo, vki_siginfo_t *si,
2522 struct vki_ucontext *uc)
2524 ThreadId tid = VG_(lwpid_to_vgtid)(VG_(gettid)());
2525 ThreadStatus at_signal = VG_(threads)[tid].status;
2527 if (VG_(clo_trace_signals))
2528 VG_(dmsg)("sigvgkill for lwp %d tid %d\n", VG_(gettid)(), tid);
2530 VG_(acquire_BigLock)(tid, "sigvgkill_handler");
2532 vg_assert(signo == VG_SIGVGKILL);
2533 vg_assert(si->si_signo == signo);
2535 /* jrs 2006 August 3: the following assertion seems incorrect to
2536 me, and fails on AIX. sigvgkill could be sent to a thread which
2537 is runnable - see VG_(nuke_all_threads_except) in the scheduler.
2538 Hence comment these out ..
2540 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
2541 VG_(post_syscall)(tid);
2545 if (at_signal == VgTs_WaitSys)
2546 VG_(post_syscall)(tid);
2547 /* jrs 2006 August 3 ends */
2549 resume_scheduler(tid);
2551 VG_(core_panic)("sigvgkill_handler couldn't return to the scheduler\n");
2554 static __attribute((unused))
2555 void pp_ksigaction ( vki_sigaction_toK_t* sa )
2558 VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
2561 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2562 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2568 VG_(printf)("pp_ksigaction: { ");
2569 for (i = 1; i <= VG_(max_signal); i++)
2570 if (VG_(sigismember(&(sa->sa_mask),i)))
2571 VG_(printf)("%d ", i);
2576 Force signal handler to default
2578 void VG_(set_default_handler)(Int signo)
2580 vki_sigaction_toK_t sa;
2582 sa.ksa_handler = VKI_SIG_DFL;
2584 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2585 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2588 VG_(sigemptyset)(&sa.sa_mask);
2590 VG_(do_sys_sigaction)(signo, &sa, NULL);
2594 Poll for pending signals, and set the next one up for delivery.
2596 void VG_(poll_signals)(ThreadId tid)
2598 vki_siginfo_t si, *sip;
2599 vki_sigset_t pollset;
2600 ThreadState *tst = VG_(get_ThreadState)(tid);
2601 vki_sigset_t saved_mask;
2603 /* look for all the signals this thread isn't blocking */
2604 /* pollset = ~tst->sig_mask */
2605 VG_(sigcomplementset)( &pollset, &tst->sig_mask );
2607 block_all_host_signals(&saved_mask); // protect signal queue
2609 /* First look for any queued pending signals */
2610 sip = next_queued(tid, &pollset); /* this thread */
2613 sip = next_queued(0, &pollset); /* process-wide */
2615 /* If there was nothing queued, ask the kernel for a pending signal */
2616 if (sip == NULL && VG_(sigtimedwait_zero)(&pollset, &si) > 0) {
2617 if (VG_(clo_trace_signals))
2618 VG_(dmsg)("poll_signals: got signal %d for thread %d\n",
2624 /* OK, something to do; deliver it */
2625 if (VG_(clo_trace_signals))
2626 VG_(dmsg)("Polling found signal %d for tid %d\n", sip->si_signo, tid);
2627 if (!is_sig_ign(sip->si_signo, tid))
2628 deliver_signal(tid, sip, NULL);
2629 else if (VG_(clo_trace_signals))
2630 VG_(dmsg)(" signal %d ignored\n", sip->si_signo);
2632 sip->si_signo = 0; /* remove from signal queue, if that's
2633 where it came from */
2636 restore_all_host_signals(&saved_mask);
2639 /* At startup, copy the process' real signal state to the SCSS.
2640 Whilst doing this, block all real signals. Then calculate SKSS and
2641 set the kernel to that. Also initialise DCSS.
2643 void VG_(sigstartup_actions) ( void )
2645 #if defined(VGO_l4re)
2646 VG_(unimplemented)((char *)__func__);
2648 Int i, ret, vKI_SIGRTMIN;
2649 vki_sigset_t saved_procmask;
2650 vki_sigaction_fromK_t sa;
2652 VG_(memset)(&scss, 0, sizeof(scss));
2653 VG_(memset)(&skss, 0, sizeof(skss));
2655 # if defined(VKI_SIGRTMIN)
2656 vKI_SIGRTMIN = VKI_SIGRTMIN;
2658 vKI_SIGRTMIN = 0; /* eg Darwin */
2661 /* VG_(printf)("SIGSTARTUP\n"); */
2662 /* Block all signals. saved_procmask remembers the previous mask,
2663 which the first thread inherits.
2665 block_all_host_signals( &saved_procmask );
2667 /* Copy per-signal settings to SCSS. */
2668 for (i = 1; i <= _VKI_NSIG; i++) {
2669 /* Get the old host action */
2670 ret = VG_(sigaction)(i, NULL, &sa);
2672 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2673 /* apparently we may not even ask about the disposition of these
2674 signals, let alone change them */
2675 if (ret != 0 && (i == VKI_SIGKILL || i == VKI_SIGSTOP))
2682 /* Try setting it back to see if this signal is really
2684 if (vKI_SIGRTMIN > 0 /* it actually exists on this platform */
2685 && i >= vKI_SIGRTMIN) {
2686 vki_sigaction_toK_t tsa, sa2;
2688 tsa.ksa_handler = (void *)sync_signalhandler;
2689 tsa.sa_flags = VKI_SA_SIGINFO;
2690 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2691 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2692 tsa.sa_restorer = 0;
2694 VG_(sigfillset)(&tsa.sa_mask);
2696 /* try setting it to some arbitrary handler */
2697 if (VG_(sigaction)(i, &tsa, NULL) != 0) {
2698 /* failed - not really usable */
2702 VG_(convert_sigaction_fromK_to_toK)( &sa, &sa2 );
2703 ret = VG_(sigaction)(i, &sa2, NULL);
2704 vg_assert(ret == 0);
2707 VG_(max_signal) = i;
2709 if (VG_(clo_trace_signals) && VG_(clo_verbosity) > 2)
2710 VG_(printf)("snaffling handler 0x%lx for signal %d\n",
2711 (Addr)(sa.ksa_handler), i );
2713 scss.scss_per_sig[i].scss_handler = sa.ksa_handler;
2714 scss.scss_per_sig[i].scss_flags = sa.sa_flags;
2715 scss.scss_per_sig[i].scss_mask = sa.sa_mask;
2717 scss.scss_per_sig[i].scss_restorer = NULL;
2718 # if !defined(VGP_ppc32_aix5) && !defined(VGP_ppc64_aix5) && \
2719 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2720 scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
2723 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2724 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2725 scss.scss_per_sig[i].scss_sa_tramp = NULL;
2727 /* We can't know what it was, because Darwin's sys_sigaction
2732 if (VG_(clo_trace_signals))
2733 VG_(dmsg)("Max kernel-supported signal is %d\n", VG_(max_signal));
2735 /* Our private internal signals are treated as ignored */
2736 scss.scss_per_sig[VG_SIGVGKILL].scss_handler = VKI_SIG_IGN;
2737 scss.scss_per_sig[VG_SIGVGKILL].scss_flags = VKI_SA_SIGINFO;
2738 VG_(sigfillset)(&scss.scss_per_sig[VG_SIGVGKILL].scss_mask);
2740 /* Copy the process' signal mask into the root thread. */
2741 vg_assert(VG_(threads)[1].status == VgTs_Init);
2742 for (i = 2; i < VG_N_THREADS; i++)
2743 vg_assert(VG_(threads)[i].status == VgTs_Empty);
2745 VG_(threads)[1].sig_mask = saved_procmask;
2746 VG_(threads)[1].tmp_sig_mask = saved_procmask;
2748 /* Calculate SKSS and apply it. This also sets the initial kernel
2749 mask we need to run with. */
2750 handle_SCSS_change( True /* forced update */ );
2752 /* Leave with all signals still blocked; the thread scheduler loop
2753 will set the appropriate mask at the appropriate time. */
2757 /*--------------------------------------------------------------------*/
2759 /*--------------------------------------------------------------------*/