2 /*--------------------------------------------------------------------*/
3 /*--- Handle system calls. syswrap-main.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2010 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "libvex_guest_offsets.h"
32 #include "libvex_trc_values.h"
33 #include "pub_core_basics.h"
34 #include "pub_core_aspacemgr.h"
35 #include "pub_core_vki.h"
36 #include "pub_core_vkiscnums.h"
37 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
38 #include "pub_core_threadstate.h"
39 #include "pub_core_libcbase.h"
40 #include "pub_core_libcassert.h"
41 #include "pub_core_libcprint.h"
42 #include "pub_core_libcproc.h" // For VG_(getpid)()
43 #include "pub_core_libcsignal.h"
44 #include "pub_core_scheduler.h" // For VG_({acquire,release}_BigLock),
46 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
47 #include "pub_core_tooliface.h"
48 #include "pub_core_options.h"
49 #include "pub_core_signals.h" // For VG_SIGVGKILL, VG_(poll_signals)
50 #include "pub_core_syscall.h"
51 #include "pub_core_machine.h"
52 #include "pub_core_syswrap.h"
54 #include "priv_types_n_macros.h"
55 #include "priv_syswrap-main.h"
57 #if defined(VGO_darwin)
58 #include "priv_syswrap-darwin.h"
61 /* Useful info which needs to be recorded somewhere:
62 Use of registers in syscalls is:
64 NUM ARG1 ARG2 ARG3 ARG4 ARG5 ARG6 ARG7 ARG8 RESULT
66 x86 eax ebx ecx edx esi edi ebp n/a n/a eax (== NUM)
67 amd64 rax rdi rsi rdx r10 r8 r9 n/a n/a rax (== NUM)
68 ppc32 r0 r3 r4 r5 r6 r7 r8 n/a n/a r3+CR0.SO (== ARG1)
69 ppc64 r0 r3 r4 r5 r6 r7 r8 n/a n/a r3+CR0.SO (== ARG1)
70 arm r7 r0 r1 r2 r3 r4 r5 n/a n/a r0 (== ARG1)
71 On s390x the svc instruction is used for system calls. The system call
72 number is encoded in the instruction (8 bit immediate field). Since Linux
73 2.6 it is also allowed to use svc 0 with the system call number in r1.
74 This was introduced for system calls >255, but works for all. It is
75 also possible to see the svc 0 together with an EXecute instruction, that
76 fills in the immediate field.
77 s390x r1/SVC r2 r3 r4 r5 r6 r7 n/a n/a r2 (== ARG1)
79 ppc32 r2 r3 r4 r5 r6 r7 r8 r9 r10 r3(res),r4(err)
80 ppc64 r2 r3 r4 r5 r6 r7 r8 r9 r10 r3(res),r4(err)
83 x86 eax +4 +8 +12 +16 +20 +24 +28 +32 edx:eax, eflags.c
84 amd64 rax rdi rsi rdx rcx r8 r9 +8 +16 rdx:rax, rflags.c
87 x86 sys eax ebx ecx edx esi edi ebp n/a eax
89 For x86-darwin, "+N" denotes "in memory at N(%esp)"; ditto
90 amd64-darwin. Apparently 0(%esp) is some kind of return address
91 (perhaps for syscalls done with "sysenter"?) I don't think it is
92 relevant for syscalls done with "int $0x80/1/2".
95 /* This is the top level of the system-call handler module. All
96 system calls are channelled through here, doing two things:
98 * notify the tool of the events (mem/reg reads, writes) happening
100 * perform the syscall, usually by passing it along to the kernel
103 A magical piece of assembly code, do_syscall_for_client_WRK, in
104 syscall-$PLATFORM.S does the tricky bit of passing a syscall to the
105 kernel, whilst having the simulator retain control.
108 /* The main function is VG_(client_syscall). The simulation calls it
109 whenever a client thread wants to do a syscall. The following is a
110 sketch of what it does.
112 * Ensures the root thread's stack is suitably mapped. Tedious and
113 arcane. See big big comment in VG_(client_syscall).
115 * First, it rounds up the syscall number and args (which is a
116 platform dependent activity) and puts them in a struct ("args")
117 and also a copy in "orig_args".
119 The pre/post wrappers refer to these structs and so no longer
120 need magic macros to access any specific registers. This struct
121 is stored in thread-specific storage.
124 * The pre-wrapper is called, passing it a pointer to struct
128 * The pre-wrapper examines the args and pokes the tool
129 appropriately. It may modify the args; this is why "orig_args"
132 The pre-wrapper may choose to 'do' the syscall itself, and
133 concludes one of three outcomes:
135 Success(N) -- syscall is already complete, with success;
138 Fail(N) -- syscall is already complete, with failure;
141 HandToKernel -- (the usual case): this needs to be given to
142 the kernel to be done, using the values in
143 the possibly-modified "args" struct.
145 In addition, the pre-wrapper may set some flags:
147 MayBlock -- only applicable when outcome==HandToKernel
149 PostOnFail -- only applicable when outcome==HandToKernel or Fail
152 * If the pre-outcome is HandToKernel, the syscall is duly handed
153 off to the kernel (perhaps involving some thread switchery, but
154 that's not important). This reduces the possible set of outcomes
155 to either Success(N) or Fail(N).
158 * The outcome (Success(N) or Fail(N)) is written back to the guest
159 register(s). This is platform specific:
161 x86: Success(N) ==> eax = N
166 ppc32: Success(N) ==> r3 = N, CR0.SO = 0
167 Fail(N) ==> r3 = N, CR0.SO = 1
170 x86: Success(N) ==> edx:eax = N, cc = 0
171 Fail(N) ==> edx:eax = N, cc = 1
173 s390x: Success(N) ==> r2 = N
176 * The post wrapper is called if:
179 - outcome==Success or (outcome==Fail and PostOnFail is set)
181 The post wrapper is passed the adulterated syscall args (struct
182 "args"), and the syscall outcome (viz, Success(N) or Fail(N)).
184 There are several other complications, primarily to do with
185 syscalls getting interrupted, explained in comments in the code.
188 /* CAVEATS for writing wrappers. It is important to follow these!
190 The macros defined in priv_types_n_macros.h are designed to help
191 decouple the wrapper logic from the actual representation of
192 syscall args/results, since these wrappers are designed to work on
195 Sometimes a PRE wrapper will complete the syscall itself, without
196 handing it to the kernel. It will use one of SET_STATUS_Success,
197 SET_STATUS_Failure or SET_STATUS_from_SysRes to set the return
198 value. It is critical to appreciate that use of the macro does not
199 immediately cause the underlying guest state to be updated -- that
200 is done by the driver logic in this file, when the wrapper returns.
202 As a result, PRE wrappers of the following form will malfunction:
207 SET_STATUS_Somehow(...)
209 // do something that assumes guest state is up to date
212 In particular, direct or indirect calls to VG_(poll_signals) after
213 setting STATUS can cause the guest state to be read (in order to
214 build signal frames). Do not do this. If you want a signal poll
215 after the syscall goes through, do "*flags |= SfPollAfter" and the
216 driver logic will do it for you.
220 Another critical requirement following introduction of new address
221 space manager (JRS, 20050923):
223 In a situation where the mappedness of memory has changed, aspacem
224 should be notified BEFORE the tool. Hence the following is
227 Bool d = VG_(am_notify_munmap)(s->start, s->end+1 - s->start);
228 VG_TRACK( die_mem_munmap, s->start, s->end+1 - s->start );
230 VG_(discard_translations)(s->start, s->end+1 - s->start);
232 whilst this is wrong:
234 VG_TRACK( die_mem_munmap, s->start, s->end+1 - s->start );
235 Bool d = VG_(am_notify_munmap)(s->start, s->end+1 - s->start);
237 VG_(discard_translations)(s->start, s->end+1 - s->start);
239 The reason is that the tool may itself ask aspacem for more shadow
240 memory as a result of the VG_TRACK call. In such a situation it is
241 critical that aspacem's segment array is up to date -- hence the
242 need to notify aspacem first.
246 Also .. take care to call VG_(discard_translations) whenever
247 memory with execute permissions is unmapped.
251 /* ---------------------------------------------------------------------
252 Do potentially blocking syscall for the client, and mess with
253 signal masks at the same time.
254 ------------------------------------------------------------------ */
256 /* Perform a syscall on behalf of a client thread, using a specific
257 signal mask. On completion, the signal mask is set to restore_mask
258 (which presumably blocks almost everything). If a signal happens
259 during the syscall, the handler should call
260 VG_(fixup_guest_state_after_syscall_interrupted) to adjust the
261 thread's context to do the right thing.
263 The _WRK function is handwritten assembly, implemented per-platform
264 in coregrind/m_syswrap/syscall-$PLAT.S. It has some very magic
265 properties. See comments at the top of
266 VG_(fixup_guest_state_after_syscall_interrupted) below for details.
268 This function (these functions) are required to return zero in case
269 of success (even if the syscall itself failed), and nonzero if the
270 sigprocmask-swizzling calls failed. We don't actually care about
271 the failure values from sigprocmask, although most of the assembly
272 implementations do attempt to return that, using the convention
273 0 for success, or 0x8000 | error-code for failure.
275 #if defined(VGO_linux)
277 UWord ML_(do_syscall_for_client_WRK)( Word syscallno,
279 const vki_sigset_t *syscall_mask,
280 const vki_sigset_t *restore_mask,
282 #elif defined(VGO_aix5)
284 UWord ML_(do_syscall_for_client_WRK)( Word syscallno,
286 const vki_sigset_t *syscall_mask,
287 const vki_sigset_t *restore_mask,
288 Word sigsetSzB, /* unused */
289 Word __nr_sigprocmask );
290 #elif defined(VGO_darwin)
292 UWord ML_(do_syscall_for_client_unix_WRK)( Word syscallno,
294 const vki_sigset_t *syscall_mask,
295 const vki_sigset_t *restore_mask,
296 Word sigsetSzB ); /* unused */
298 UWord ML_(do_syscall_for_client_mach_WRK)( Word syscallno,
300 const vki_sigset_t *syscall_mask,
301 const vki_sigset_t *restore_mask,
302 Word sigsetSzB ); /* unused */
304 UWord ML_(do_syscall_for_client_mdep_WRK)( Word syscallno,
306 const vki_sigset_t *syscall_mask,
307 const vki_sigset_t *restore_mask,
308 Word sigsetSzB ); /* unused */
309 #elif defined(VGO_l4re)
311 UWord ML_(do_syscall_for_client_WRK)( Word syscallno,
313 const vki_sigset_t *syscall_mask,
314 const vki_sigset_t *restore_mask,
316 UWord ML_(do_syscall_for_client_WRK)( Word syscallno,
318 const vki_sigset_t *syscall_mask,
319 const vki_sigset_t *restore_mask,
322 VG_(unimplemented)((char*)__func__);
331 void do_syscall_for_client ( Int syscallno,
333 const vki_sigset_t* syscall_mask )
335 #if !defined(VGO_l4re)
338 # if defined(VGO_linux)
339 err = ML_(do_syscall_for_client_WRK)(
340 syscallno, &tst->arch.vex,
341 syscall_mask, &saved, sizeof(vki_sigset_t)
343 # elif defined(VGO_aix5)
344 err = ML_(do_syscall_for_client_WRK)(
345 syscallno, &tst->arch.vex,
346 syscall_mask, &saved, 0/*unused:sigsetSzB*/,
349 # elif defined(VGO_darwin)
350 switch (VG_DARWIN_SYSNO_CLASS(syscallno)) {
351 case VG_DARWIN_SYSCALL_CLASS_UNIX:
352 err = ML_(do_syscall_for_client_unix_WRK)(
353 VG_DARWIN_SYSNO_FOR_KERNEL(syscallno), &tst->arch.vex,
354 syscall_mask, &saved, 0/*unused:sigsetSzB*/
357 case VG_DARWIN_SYSCALL_CLASS_MACH:
358 err = ML_(do_syscall_for_client_mach_WRK)(
359 VG_DARWIN_SYSNO_FOR_KERNEL(syscallno), &tst->arch.vex,
360 syscall_mask, &saved, 0/*unused:sigsetSzB*/
363 case VG_DARWIN_SYSCALL_CLASS_MDEP:
364 err = ML_(do_syscall_for_client_mdep_WRK)(
365 VG_DARWIN_SYSNO_FOR_KERNEL(syscallno), &tst->arch.vex,
366 syscall_mask, &saved, 0/*unused:sigsetSzB*/
379 "ML_(do_syscall_for_client_WRK): sigprocmask error %d",
383 VG_(unimplemented)("unimplemented function do_syscall_for_client()");
388 /* ---------------------------------------------------------------------
389 Impedance matchers and misc helpers
390 ------------------------------------------------------------------ */
393 Bool eq_SyscallArgs ( SyscallArgs* a1, SyscallArgs* a2 )
395 return a1->sysno == a2->sysno
396 && a1->arg1 == a2->arg1
397 && a1->arg2 == a2->arg2
398 && a1->arg3 == a2->arg3
399 && a1->arg4 == a2->arg4
400 && a1->arg5 == a2->arg5
401 && a1->arg6 == a2->arg6
402 && a1->arg7 == a2->arg7
403 && a1->arg8 == a2->arg8;
407 Bool eq_SyscallStatus ( SyscallStatus* s1, SyscallStatus* s2 )
409 /* was: return s1->what == s2->what && sr_EQ( s1->sres, s2->sres ); */
410 if (s1->what == s2->what && sr_EQ( s1->sres, s2->sres ))
412 # if defined(VGO_darwin)
413 /* Darwin-specific debugging guff */
414 vg_assert(s1->what == s2->what);
415 VG_(printf)("eq_SyscallStatus:\n");
416 VG_(printf)(" {%lu %lu %u}\n", s1->sres._wLO, s1->sres._wHI, s1->sres._mode);
417 VG_(printf)(" {%lu %lu %u}\n", s2->sres._wLO, s2->sres._wHI, s2->sres._mode);
423 /* Convert between SysRes and SyscallStatus, to the extent possible. */
426 SyscallStatus convert_SysRes_to_SyscallStatus ( SysRes res )
428 SyscallStatus status;
429 status.what = SsComplete;
435 /* Impedance matchers. These convert syscall arg or result data from
436 the platform-specific in-guest-state format to the canonical
437 formats, and back. */
440 void getSyscallArgsFromGuestState ( /*OUT*/SyscallArgs* canonical,
441 /*IN*/ VexGuestArchState* gst_vanilla,
444 #if defined(VGP_x86_linux)
445 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
446 canonical->sysno = gst->guest_EAX;
447 canonical->arg1 = gst->guest_EBX;
448 canonical->arg2 = gst->guest_ECX;
449 canonical->arg3 = gst->guest_EDX;
450 canonical->arg4 = gst->guest_ESI;
451 canonical->arg5 = gst->guest_EDI;
452 canonical->arg6 = gst->guest_EBP;
456 #elif defined(VGP_amd64_linux)
457 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
458 canonical->sysno = gst->guest_RAX;
459 canonical->arg1 = gst->guest_RDI;
460 canonical->arg2 = gst->guest_RSI;
461 canonical->arg3 = gst->guest_RDX;
462 canonical->arg4 = gst->guest_R10;
463 canonical->arg5 = gst->guest_R8;
464 canonical->arg6 = gst->guest_R9;
468 #elif defined(VGP_ppc32_linux)
469 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
470 canonical->sysno = gst->guest_GPR0;
471 canonical->arg1 = gst->guest_GPR3;
472 canonical->arg2 = gst->guest_GPR4;
473 canonical->arg3 = gst->guest_GPR5;
474 canonical->arg4 = gst->guest_GPR6;
475 canonical->arg5 = gst->guest_GPR7;
476 canonical->arg6 = gst->guest_GPR8;
480 #elif defined(VGP_ppc64_linux)
481 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
482 canonical->sysno = gst->guest_GPR0;
483 canonical->arg1 = gst->guest_GPR3;
484 canonical->arg2 = gst->guest_GPR4;
485 canonical->arg3 = gst->guest_GPR5;
486 canonical->arg4 = gst->guest_GPR6;
487 canonical->arg5 = gst->guest_GPR7;
488 canonical->arg6 = gst->guest_GPR8;
492 #elif defined(VGP_arm_linux)
493 VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
494 canonical->sysno = gst->guest_R7;
495 canonical->arg1 = gst->guest_R0;
496 canonical->arg2 = gst->guest_R1;
497 canonical->arg3 = gst->guest_R2;
498 canonical->arg4 = gst->guest_R3;
499 canonical->arg5 = gst->guest_R4;
500 canonical->arg6 = gst->guest_R5;
504 #elif defined(VGP_ppc32_aix5)
505 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
506 canonical->sysno = gst->guest_GPR2;
507 canonical->arg1 = gst->guest_GPR3;
508 canonical->arg2 = gst->guest_GPR4;
509 canonical->arg3 = gst->guest_GPR5;
510 canonical->arg4 = gst->guest_GPR6;
511 canonical->arg5 = gst->guest_GPR7;
512 canonical->arg6 = gst->guest_GPR8;
513 canonical->arg7 = gst->guest_GPR9;
514 canonical->arg8 = gst->guest_GPR10;
516 #elif defined(VGP_ppc64_aix5)
517 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
518 canonical->sysno = gst->guest_GPR2;
519 canonical->arg1 = gst->guest_GPR3;
520 canonical->arg2 = gst->guest_GPR4;
521 canonical->arg3 = gst->guest_GPR5;
522 canonical->arg4 = gst->guest_GPR6;
523 canonical->arg5 = gst->guest_GPR7;
524 canonical->arg6 = gst->guest_GPR8;
525 canonical->arg7 = gst->guest_GPR9;
526 canonical->arg8 = gst->guest_GPR10;
528 #elif defined(VGP_x86_darwin)
529 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
530 UWord *stack = (UWord *)gst->guest_ESP;
531 // GrP fixme hope syscalls aren't called with really shallow stacks...
532 canonical->sysno = gst->guest_EAX;
533 if (canonical->sysno != 0) {
534 // stack[0] is return address
535 canonical->arg1 = stack[1];
536 canonical->arg2 = stack[2];
537 canonical->arg3 = stack[3];
538 canonical->arg4 = stack[4];
539 canonical->arg5 = stack[5];
540 canonical->arg6 = stack[6];
541 canonical->arg7 = stack[7];
542 canonical->arg8 = stack[8];
544 // GrP fixme hack handle syscall()
545 // GrP fixme what about __syscall() ?
546 // stack[0] is return address
547 // DDD: the tool can't see that the params have been shifted! Can
548 // lead to incorrect checking, I think, because the PRRAn/PSARn
549 // macros will mention the pre-shifted args.
550 canonical->sysno = stack[1];
551 vg_assert(canonical->sysno != 0);
552 canonical->arg1 = stack[2];
553 canonical->arg2 = stack[3];
554 canonical->arg3 = stack[4];
555 canonical->arg4 = stack[5];
556 canonical->arg5 = stack[6];
557 canonical->arg6 = stack[7];
558 canonical->arg7 = stack[8];
559 canonical->arg8 = stack[9];
561 PRINT("SYSCALL[%d,?](%s) syscall(%s, ...); please stand by...\n",
562 VG_(getpid)(), /*tid,*/
563 VG_SYSNUM_STRING(0), VG_SYSNUM_STRING(canonical->sysno));
566 // Here we determine what kind of syscall it was by looking at the
567 // interrupt kind, and then encode the syscall number using the 64-bit
568 // encoding for Valgrind's internal use.
570 // DDD: Would it be better to stash the JMP kind into the Darwin
571 // thread state rather than passing in the trc?
573 case VEX_TRC_JMP_SYS_INT128:
574 // int $0x80 = Unix, 64-bit result
575 vg_assert(canonical->sysno >= 0);
576 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_UNIX(canonical->sysno);
578 case VEX_TRC_JMP_SYS_SYSENTER:
579 // syscall = Unix, 32-bit result
580 // OR Mach, 32-bit result
581 if (canonical->sysno >= 0) {
582 // GrP fixme hack: 0xffff == I386_SYSCALL_NUMBER_MASK
583 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_UNIX(canonical->sysno
586 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_MACH(-canonical->sysno);
589 case VEX_TRC_JMP_SYS_INT129:
590 // int $0x81 = Mach, 32-bit result
591 vg_assert(canonical->sysno < 0);
592 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_MACH(-canonical->sysno);
594 case VEX_TRC_JMP_SYS_INT130:
595 // int $0x82 = mdep, 32-bit result
596 vg_assert(canonical->sysno >= 0);
597 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_MDEP(canonical->sysno);
604 #elif defined(VGP_amd64_darwin)
605 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
606 UWord *stack = (UWord *)gst->guest_RSP;
608 vg_assert(trc == VEX_TRC_JMP_SYS_SYSCALL);
610 // GrP fixme hope syscalls aren't called with really shallow stacks...
611 canonical->sysno = gst->guest_RAX;
612 if (canonical->sysno != __NR_syscall) {
613 // stack[0] is return address
614 canonical->arg1 = gst->guest_RDI;
615 canonical->arg2 = gst->guest_RSI;
616 canonical->arg3 = gst->guest_RDX;
617 canonical->arg4 = gst->guest_R10; // not rcx with syscall insn
618 canonical->arg5 = gst->guest_R8;
619 canonical->arg6 = gst->guest_R9;
620 canonical->arg7 = stack[1];
621 canonical->arg8 = stack[2];
623 // GrP fixme hack handle syscall()
624 // GrP fixme what about __syscall() ?
625 // stack[0] is return address
626 // DDD: the tool can't see that the params have been shifted! Can
627 // lead to incorrect checking, I think, because the PRRAn/PSARn
628 // macros will mention the pre-shifted args.
629 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_UNIX(gst->guest_RDI);
630 vg_assert(canonical->sysno != __NR_syscall);
631 canonical->arg1 = gst->guest_RSI;
632 canonical->arg2 = gst->guest_RDX;
633 canonical->arg3 = gst->guest_R10; // not rcx with syscall insn
634 canonical->arg4 = gst->guest_R8;
635 canonical->arg5 = gst->guest_R9;
636 canonical->arg6 = stack[1];
637 canonical->arg7 = stack[2];
638 canonical->arg8 = stack[3];
640 PRINT("SYSCALL[%d,?](%s) syscall(%s, ...); please stand by...\n",
641 VG_(getpid)(), /*tid,*/
642 VG_SYSNUM_STRING(0), VG_SYSNUM_STRING(canonical->sysno));
645 // no canonical->sysno adjustment needed
647 #elif defined(VGP_x86_l4re)
648 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
650 case VEX_TRC_JMP_SYS_SYSENTER: // fallthrough
651 case VEX_TRC_JMP_SYS_INT48:
652 canonical->sysno = SYS_INVOKE;
654 case VEX_TRC_JMP_SYS_INT50:
655 canonical->sysno = SYS_DEBUG;
657 case VEX_TRC_JMP_SIGTRAP:
658 canonical->sysno = SYS_ENTER_KDEBUG;
660 case VEX_TRC_JMP_SYS_INT128:
661 canonical->sysno = SYS_LINUX_INT80;
663 case VEX_TRC_JMP_L4_UD2:
664 canonical->sysno = SYS_UD2;
666 case VEX_TRC_JMP_L4_ARTIFICIAL:
667 canonical->sysno = SYS_ARTIFICIAL;
670 VG_(printf)("Unhandled TRC: %d ??\n", trc);
671 enter_kdebug("invalid syscall");
673 canonical->arg1 = gst->guest_EAX;
674 canonical->arg2 = gst->guest_EBX;
675 canonical->arg3 = gst->guest_ECX;
676 canonical->arg4 = gst->guest_EDX;
677 canonical->arg5 = gst->guest_ESI;
678 canonical->arg6 = gst->guest_EDI;
679 canonical->arg7 = gst->guest_EBP;
682 #elif defined(VGP_s390x_linux)
683 VexGuestS390XState* gst = (VexGuestS390XState*)gst_vanilla;
684 canonical->sysno = gst->guest_SYSNO;
685 canonical->arg1 = gst->guest_r2;
686 canonical->arg2 = gst->guest_r3;
687 canonical->arg3 = gst->guest_r4;
688 canonical->arg4 = gst->guest_r5;
689 canonical->arg5 = gst->guest_r6;
690 canonical->arg6 = gst->guest_r7;
694 # error "getSyscallArgsFromGuestState: unknown arch"
699 void putSyscallArgsIntoGuestState ( /*IN*/ SyscallArgs* canonical,
700 /*OUT*/VexGuestArchState* gst_vanilla )
702 #if defined(VGP_x86_linux)
703 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
704 gst->guest_EAX = canonical->sysno;
705 gst->guest_EBX = canonical->arg1;
706 gst->guest_ECX = canonical->arg2;
707 gst->guest_EDX = canonical->arg3;
708 gst->guest_ESI = canonical->arg4;
709 gst->guest_EDI = canonical->arg5;
710 gst->guest_EBP = canonical->arg6;
712 #elif defined(VGP_amd64_linux)
713 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
714 gst->guest_RAX = canonical->sysno;
715 gst->guest_RDI = canonical->arg1;
716 gst->guest_RSI = canonical->arg2;
717 gst->guest_RDX = canonical->arg3;
718 gst->guest_R10 = canonical->arg4;
719 gst->guest_R8 = canonical->arg5;
720 gst->guest_R9 = canonical->arg6;
722 #elif defined(VGP_ppc32_linux)
723 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
724 gst->guest_GPR0 = canonical->sysno;
725 gst->guest_GPR3 = canonical->arg1;
726 gst->guest_GPR4 = canonical->arg2;
727 gst->guest_GPR5 = canonical->arg3;
728 gst->guest_GPR6 = canonical->arg4;
729 gst->guest_GPR7 = canonical->arg5;
730 gst->guest_GPR8 = canonical->arg6;
732 #elif defined(VGP_ppc64_linux)
733 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
734 gst->guest_GPR0 = canonical->sysno;
735 gst->guest_GPR3 = canonical->arg1;
736 gst->guest_GPR4 = canonical->arg2;
737 gst->guest_GPR5 = canonical->arg3;
738 gst->guest_GPR6 = canonical->arg4;
739 gst->guest_GPR7 = canonical->arg5;
740 gst->guest_GPR8 = canonical->arg6;
742 #elif defined(VGP_arm_linux)
743 VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
744 gst->guest_R7 = canonical->sysno;
745 gst->guest_R0 = canonical->arg1;
746 gst->guest_R1 = canonical->arg2;
747 gst->guest_R2 = canonical->arg3;
748 gst->guest_R3 = canonical->arg4;
749 gst->guest_R4 = canonical->arg5;
750 gst->guest_R5 = canonical->arg6;
752 #elif defined(VGP_ppc32_aix5)
753 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
754 gst->guest_GPR2 = canonical->sysno;
755 gst->guest_GPR3 = canonical->arg1;
756 gst->guest_GPR4 = canonical->arg2;
757 gst->guest_GPR5 = canonical->arg3;
758 gst->guest_GPR6 = canonical->arg4;
759 gst->guest_GPR7 = canonical->arg5;
760 gst->guest_GPR8 = canonical->arg6;
761 gst->guest_GPR9 = canonical->arg7;
762 gst->guest_GPR10 = canonical->arg8;
764 #elif defined(VGP_ppc64_aix5)
765 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
766 gst->guest_GPR2 = canonical->sysno;
767 gst->guest_GPR3 = canonical->arg1;
768 gst->guest_GPR4 = canonical->arg2;
769 gst->guest_GPR5 = canonical->arg3;
770 gst->guest_GPR6 = canonical->arg4;
771 gst->guest_GPR7 = canonical->arg5;
772 gst->guest_GPR8 = canonical->arg6;
773 gst->guest_GPR9 = canonical->arg7;
774 gst->guest_GPR10 = canonical->arg8;
776 #elif defined(VGP_x86_darwin)
777 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
778 UWord *stack = (UWord *)gst->guest_ESP;
780 gst->guest_EAX = VG_DARWIN_SYSNO_FOR_KERNEL(canonical->sysno);
782 // GrP fixme? gst->guest_TEMP_EFLAG_C = 0;
783 // stack[0] is return address
784 stack[1] = canonical->arg1;
785 stack[2] = canonical->arg2;
786 stack[3] = canonical->arg3;
787 stack[4] = canonical->arg4;
788 stack[5] = canonical->arg5;
789 stack[6] = canonical->arg6;
790 stack[7] = canonical->arg7;
791 stack[8] = canonical->arg8;
793 #elif defined(VGP_amd64_darwin)
794 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
795 UWord *stack = (UWord *)gst->guest_RSP;
797 gst->guest_RAX = VG_DARWIN_SYSNO_FOR_KERNEL(canonical->sysno);
798 // GrP fixme? gst->guest_TEMP_EFLAG_C = 0;
800 // stack[0] is return address
801 gst->guest_RDI = canonical->arg1;
802 gst->guest_RSI = canonical->arg2;
803 gst->guest_RDX = canonical->arg3;
804 gst->guest_RCX = canonical->arg4;
805 gst->guest_R8 = canonical->arg5;
806 gst->guest_R9 = canonical->arg6;
807 stack[1] = canonical->arg7;
808 stack[2] = canonical->arg8;
810 #elif defined(VGP_x86_l4re)
811 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
812 gst->guest_EAX = canonical->arg1;
813 gst->guest_EBX = canonical->arg2;
814 gst->guest_ECX = canonical->arg3;
815 gst->guest_EDX = canonical->arg4;
816 gst->guest_ESI = canonical->arg5;
817 gst->guest_EDI = canonical->arg6;
818 gst->guest_EBP = canonical->arg7;
820 #elif defined(VGP_s390x_linux)
821 VexGuestS390XState* gst = (VexGuestS390XState*)gst_vanilla;
822 gst->guest_SYSNO = canonical->sysno;
823 gst->guest_r2 = canonical->arg1;
824 gst->guest_r3 = canonical->arg2;
825 gst->guest_r4 = canonical->arg3;
826 gst->guest_r5 = canonical->arg4;
827 gst->guest_r6 = canonical->arg5;
828 gst->guest_r7 = canonical->arg6;
831 # error "putSyscallArgsIntoGuestState: unknown arch"
836 void getSyscallStatusFromGuestState ( /*OUT*/SyscallStatus* canonical,
837 /*IN*/ VexGuestArchState* gst_vanilla )
839 # if defined(VGP_x86_linux)
840 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
841 canonical->sres = VG_(mk_SysRes_x86_linux)( gst->guest_EAX );
842 canonical->what = SsComplete;
844 # elif defined(VGP_amd64_linux)
845 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
846 canonical->sres = VG_(mk_SysRes_amd64_linux)( gst->guest_RAX );
847 canonical->what = SsComplete;
849 # elif defined(VGP_ppc32_linux)
850 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
851 UInt cr = LibVEX_GuestPPC32_get_CR( gst );
852 UInt cr0so = (cr >> 28) & 1;
853 canonical->sres = VG_(mk_SysRes_ppc32_linux)( gst->guest_GPR3, cr0so );
854 canonical->what = SsComplete;
856 # elif defined(VGP_ppc64_linux)
857 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
858 UInt cr = LibVEX_GuestPPC64_get_CR( gst );
859 UInt cr0so = (cr >> 28) & 1;
860 canonical->sres = VG_(mk_SysRes_ppc64_linux)( gst->guest_GPR3, cr0so );
861 canonical->what = SsComplete;
863 # elif defined(VGP_arm_linux)
864 VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
865 canonical->sres = VG_(mk_SysRes_arm_linux)( gst->guest_R0 );
866 canonical->what = SsComplete;
868 # elif defined(VGP_ppc32_aix5)
869 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
870 canonical->sres = VG_(mk_SysRes_ppc32_aix5)( gst->guest_GPR3,
872 canonical->what = SsComplete;
874 # elif defined(VGP_ppc64_aix5)
875 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
876 canonical->sres = VG_(mk_SysRes_ppc64_aix5)( gst->guest_GPR3,
878 canonical->what = SsComplete;
880 # elif defined(VGP_x86_darwin)
881 /* duplicates logic in m_signals.VG_UCONTEXT_SYSCALL_SYSRES */
882 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
883 UInt carry = 1 & LibVEX_GuestX86_get_eflags(gst);
887 switch (gst->guest_SC_CLASS) {
888 case VG_DARWIN_SYSCALL_CLASS_UNIX:
889 // int $0x80 = Unix, 64-bit result
891 wLO = gst->guest_EAX;
892 wHI = gst->guest_EDX;
894 case VG_DARWIN_SYSCALL_CLASS_MACH:
895 // int $0x81 = Mach, 32-bit result
896 wLO = gst->guest_EAX;
898 case VG_DARWIN_SYSCALL_CLASS_MDEP:
899 // int $0x82 = mdep, 32-bit result
900 wLO = gst->guest_EAX;
906 canonical->sres = VG_(mk_SysRes_x86_darwin)(
907 gst->guest_SC_CLASS, err ? True : False,
910 canonical->what = SsComplete;
912 # elif defined(VGP_amd64_darwin)
913 /* duplicates logic in m_signals.VG_UCONTEXT_SYSCALL_SYSRES */
914 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
915 ULong carry = 1 & LibVEX_GuestAMD64_get_rflags(gst);
919 switch (gst->guest_SC_CLASS) {
920 case VG_DARWIN_SYSCALL_CLASS_UNIX:
921 // syscall = Unix, 128-bit result
923 wLO = gst->guest_RAX;
924 wHI = gst->guest_RDX;
926 case VG_DARWIN_SYSCALL_CLASS_MACH:
927 // syscall = Mach, 64-bit result
928 wLO = gst->guest_RAX;
930 case VG_DARWIN_SYSCALL_CLASS_MDEP:
931 // syscall = mdep, 64-bit result
932 wLO = gst->guest_RAX;
938 canonical->sres = VG_(mk_SysRes_amd64_darwin)(
939 gst->guest_SC_CLASS, err ? True : False,
942 canonical->what = SsComplete;
944 # elif defined(VGP_x86_l4re)
945 // XXX // We need real expected statuses here to make the ifdef in lines
946 // 1803-1822 go away.
948 //VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
949 canonical->sres = VG_(mk_SysRes_x86_l4re)( 0 );
950 canonical->what = SsComplete;
952 # elif defined(VGP_s390x_linux)
953 VexGuestS390XState* gst = (VexGuestS390XState*)gst_vanilla;
954 canonical->sres = VG_(mk_SysRes_s390x_linux)( gst->guest_r2 );
955 canonical->what = SsComplete;
958 # error "getSyscallStatusFromGuestState: unknown arch"
963 void putSyscallStatusIntoGuestState ( /*IN*/ ThreadId tid,
964 /*IN*/ SyscallStatus* canonical,
965 /*OUT*/VexGuestArchState* gst_vanilla )
967 # if defined(VGP_x86_linux)
968 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
969 vg_assert(canonical->what == SsComplete);
970 if (sr_isError(canonical->sres)) {
971 /* This isn't exactly right, in that really a Failure with res
972 not in the range 1 .. 4095 is unrepresentable in the
973 Linux-x86 scheme. Oh well. */
974 gst->guest_EAX = - (Int)sr_Err(canonical->sres);
976 gst->guest_EAX = sr_Res(canonical->sres);
978 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
979 OFFSET_x86_EAX, sizeof(UWord) );
981 # elif defined(VGP_amd64_linux)
982 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
983 vg_assert(canonical->what == SsComplete);
984 if (sr_isError(canonical->sres)) {
985 /* This isn't exactly right, in that really a Failure with res
986 not in the range 1 .. 4095 is unrepresentable in the
987 Linux-amd64 scheme. Oh well. */
988 gst->guest_RAX = - (Long)sr_Err(canonical->sres);
990 gst->guest_RAX = sr_Res(canonical->sres);
992 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
993 OFFSET_amd64_RAX, sizeof(UWord) );
995 # elif defined(VGP_ppc32_linux)
996 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
997 UInt old_cr = LibVEX_GuestPPC32_get_CR(gst);
998 vg_assert(canonical->what == SsComplete);
999 if (sr_isError(canonical->sres)) {
1001 LibVEX_GuestPPC32_put_CR( old_cr | (1<<28), gst );
1002 gst->guest_GPR3 = sr_Err(canonical->sres);
1005 LibVEX_GuestPPC32_put_CR( old_cr & ~(1<<28), gst );
1006 gst->guest_GPR3 = sr_Res(canonical->sres);
1008 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1009 OFFSET_ppc32_GPR3, sizeof(UWord) );
1010 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1011 OFFSET_ppc32_CR0_0, sizeof(UChar) );
1013 # elif defined(VGP_ppc64_linux)
1014 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
1015 UInt old_cr = LibVEX_GuestPPC64_get_CR(gst);
1016 vg_assert(canonical->what == SsComplete);
1017 if (sr_isError(canonical->sres)) {
1019 LibVEX_GuestPPC64_put_CR( old_cr | (1<<28), gst );
1020 gst->guest_GPR3 = sr_Err(canonical->sres);
1023 LibVEX_GuestPPC64_put_CR( old_cr & ~(1<<28), gst );
1024 gst->guest_GPR3 = sr_Res(canonical->sres);
1026 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1027 OFFSET_ppc64_GPR3, sizeof(UWord) );
1028 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1029 OFFSET_ppc64_CR0_0, sizeof(UChar) );
1031 # elif defined(VGP_arm_linux)
1032 VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
1033 vg_assert(canonical->what == SsComplete);
1034 if (sr_isError(canonical->sres)) {
1035 /* This isn't exactly right, in that really a Failure with res
1036 not in the range 1 .. 4095 is unrepresentable in the
1037 Linux-arm scheme. Oh well. */
1038 gst->guest_R0 = - (Int)sr_Err(canonical->sres);
1040 gst->guest_R0 = sr_Res(canonical->sres);
1042 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1043 OFFSET_arm_R0, sizeof(UWord) );
1045 # elif defined(VGP_ppc32_aix5)
1046 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
1047 vg_assert(canonical->what == SsComplete);
1048 gst->guest_GPR3 = canonical->sres.res;
1049 gst->guest_GPR4 = canonical->sres.err;
1050 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1051 OFFSET_ppc32_GPR3, sizeof(UWord) );
1052 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1053 OFFSET_ppc32_GPR4, sizeof(UWord) );
1055 # elif defined(VGP_ppc64_aix5)
1056 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
1057 vg_assert(canonical->what == SsComplete);
1058 gst->guest_GPR3 = canonical->sres.res;
1059 gst->guest_GPR4 = canonical->sres.err;
1060 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1061 OFFSET_ppc64_GPR3, sizeof(UWord) );
1062 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1063 OFFSET_ppc64_GPR4, sizeof(UWord) );
1065 #elif defined(VGP_x86_darwin)
1066 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
1067 SysRes sres = canonical->sres;
1068 vg_assert(canonical->what == SsComplete);
1069 /* Unfortunately here we have to break abstraction and look
1070 directly inside 'res', in order to decide what to do. */
1071 switch (sres._mode) {
1072 case SysRes_MACH: // int $0x81 = Mach, 32-bit result
1073 case SysRes_MDEP: // int $0x82 = mdep, 32-bit result
1074 gst->guest_EAX = sres._wLO;
1075 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1076 OFFSET_x86_EAX, sizeof(UInt) );
1078 case SysRes_UNIX_OK: // int $0x80 = Unix, 64-bit result
1079 case SysRes_UNIX_ERR: // int $0x80 = Unix, 64-bit error
1080 gst->guest_EAX = sres._wLO;
1081 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1082 OFFSET_x86_EAX, sizeof(UInt) );
1083 gst->guest_EDX = sres._wHI;
1084 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1085 OFFSET_x86_EDX, sizeof(UInt) );
1086 LibVEX_GuestX86_put_eflag_c( sres._mode==SysRes_UNIX_ERR ? 1 : 0,
1088 // GrP fixme sets defined for entire eflags, not just bit c
1089 // DDD: this breaks exp-ptrcheck.
1090 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1091 offsetof(VexGuestX86State, guest_CC_DEP1), sizeof(UInt) );
1098 #elif defined(VGP_amd64_darwin)
1099 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
1100 SysRes sres = canonical->sres;
1101 vg_assert(canonical->what == SsComplete);
1102 /* Unfortunately here we have to break abstraction and look
1103 directly inside 'res', in order to decide what to do. */
1104 switch (sres._mode) {
1105 case SysRes_MACH: // syscall = Mach, 64-bit result
1106 case SysRes_MDEP: // syscall = mdep, 64-bit result
1107 gst->guest_RAX = sres._wLO;
1108 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1109 OFFSET_amd64_RAX, sizeof(ULong) );
1111 case SysRes_UNIX_OK: // syscall = Unix, 128-bit result
1112 case SysRes_UNIX_ERR: // syscall = Unix, 128-bit error
1113 gst->guest_RAX = sres._wLO;
1114 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1115 OFFSET_amd64_RAX, sizeof(ULong) );
1116 gst->guest_RDX = sres._wHI;
1117 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1118 OFFSET_amd64_RDX, sizeof(ULong) );
1119 LibVEX_GuestAMD64_put_rflag_c( sres._mode==SysRes_UNIX_ERR ? 1 : 0,
1121 // GrP fixme sets defined for entire rflags, not just bit c
1122 // DDD: this breaks exp-ptrcheck.
1123 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1124 offsetof(VexGuestAMD64State, guest_CC_DEP1), sizeof(ULong) );
1131 # elif defined(VGP_s390x_linux)
1132 VexGuestS390XState* gst = (VexGuestS390XState*)gst_vanilla;
1133 vg_assert(canonical->what == SsComplete);
1134 if (sr_isError(canonical->sres)) {
1135 gst->guest_r2 = - (Long)sr_Err(canonical->sres);
1137 gst->guest_r2 = sr_Res(canonical->sres);
1140 # elif defined(VGP_x86_l4re)
1141 //VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
1142 vg_assert(canonical->what == SsComplete);
1145 # error "putSyscallStatusIntoGuestState: unknown arch"
1150 /* Tell me the offsets in the guest state of the syscall params, so
1151 that the scalar argument checkers don't have to have this info
1155 void getSyscallArgLayout ( /*OUT*/SyscallArgLayout* layout )
1157 #if defined(VGP_x86_linux)
1158 layout->o_sysno = OFFSET_x86_EAX;
1159 layout->o_arg1 = OFFSET_x86_EBX;
1160 layout->o_arg2 = OFFSET_x86_ECX;
1161 layout->o_arg3 = OFFSET_x86_EDX;
1162 layout->o_arg4 = OFFSET_x86_ESI;
1163 layout->o_arg5 = OFFSET_x86_EDI;
1164 layout->o_arg6 = OFFSET_x86_EBP;
1165 layout->uu_arg7 = -1; /* impossible value */
1166 layout->uu_arg8 = -1; /* impossible value */
1168 #elif defined(VGP_amd64_linux)
1169 layout->o_sysno = OFFSET_amd64_RAX;
1170 layout->o_arg1 = OFFSET_amd64_RDI;
1171 layout->o_arg2 = OFFSET_amd64_RSI;
1172 layout->o_arg3 = OFFSET_amd64_RDX;
1173 layout->o_arg4 = OFFSET_amd64_R10;
1174 layout->o_arg5 = OFFSET_amd64_R8;
1175 layout->o_arg6 = OFFSET_amd64_R9;
1176 layout->uu_arg7 = -1; /* impossible value */
1177 layout->uu_arg8 = -1; /* impossible value */
1179 #elif defined(VGP_ppc32_linux)
1180 layout->o_sysno = OFFSET_ppc32_GPR0;
1181 layout->o_arg1 = OFFSET_ppc32_GPR3;
1182 layout->o_arg2 = OFFSET_ppc32_GPR4;
1183 layout->o_arg3 = OFFSET_ppc32_GPR5;
1184 layout->o_arg4 = OFFSET_ppc32_GPR6;
1185 layout->o_arg5 = OFFSET_ppc32_GPR7;
1186 layout->o_arg6 = OFFSET_ppc32_GPR8;
1187 layout->uu_arg7 = -1; /* impossible value */
1188 layout->uu_arg8 = -1; /* impossible value */
1190 #elif defined(VGP_ppc64_linux)
1191 layout->o_sysno = OFFSET_ppc64_GPR0;
1192 layout->o_arg1 = OFFSET_ppc64_GPR3;
1193 layout->o_arg2 = OFFSET_ppc64_GPR4;
1194 layout->o_arg3 = OFFSET_ppc64_GPR5;
1195 layout->o_arg4 = OFFSET_ppc64_GPR6;
1196 layout->o_arg5 = OFFSET_ppc64_GPR7;
1197 layout->o_arg6 = OFFSET_ppc64_GPR8;
1198 layout->uu_arg7 = -1; /* impossible value */
1199 layout->uu_arg8 = -1; /* impossible value */
1201 #elif defined(VGP_arm_linux)
1202 layout->o_sysno = OFFSET_arm_R7;
1203 layout->o_arg1 = OFFSET_arm_R0;
1204 layout->o_arg2 = OFFSET_arm_R1;
1205 layout->o_arg3 = OFFSET_arm_R2;
1206 layout->o_arg4 = OFFSET_arm_R3;
1207 layout->o_arg5 = OFFSET_arm_R4;
1208 layout->o_arg6 = OFFSET_arm_R5;
1209 layout->uu_arg7 = -1; /* impossible value */
1210 layout->uu_arg8 = -1; /* impossible value */
1212 #elif defined(VGP_ppc32_aix5)
1213 layout->o_sysno = OFFSET_ppc32_GPR2;
1214 layout->o_arg1 = OFFSET_ppc32_GPR3;
1215 layout->o_arg2 = OFFSET_ppc32_GPR4;
1216 layout->o_arg3 = OFFSET_ppc32_GPR5;
1217 layout->o_arg4 = OFFSET_ppc32_GPR6;
1218 layout->o_arg5 = OFFSET_ppc32_GPR7;
1219 layout->o_arg6 = OFFSET_ppc32_GPR8;
1220 layout->o_arg7 = OFFSET_ppc32_GPR9;
1221 layout->o_arg8 = OFFSET_ppc32_GPR10;
1223 #elif defined(VGP_ppc64_aix5)
1224 layout->o_sysno = OFFSET_ppc64_GPR2;
1225 layout->o_arg1 = OFFSET_ppc64_GPR3;
1226 layout->o_arg2 = OFFSET_ppc64_GPR4;
1227 layout->o_arg3 = OFFSET_ppc64_GPR5;
1228 layout->o_arg4 = OFFSET_ppc64_GPR6;
1229 layout->o_arg5 = OFFSET_ppc64_GPR7;
1230 layout->o_arg6 = OFFSET_ppc64_GPR8;
1231 layout->o_arg7 = OFFSET_ppc64_GPR9;
1232 layout->o_arg8 = OFFSET_ppc64_GPR10;
1234 #elif defined(VGP_x86_darwin)
1235 layout->o_sysno = OFFSET_x86_EAX;
1236 // syscall parameters are on stack in C convention
1237 layout->s_arg1 = sizeof(UWord) * 1;
1238 layout->s_arg2 = sizeof(UWord) * 2;
1239 layout->s_arg3 = sizeof(UWord) * 3;
1240 layout->s_arg4 = sizeof(UWord) * 4;
1241 layout->s_arg5 = sizeof(UWord) * 5;
1242 layout->s_arg6 = sizeof(UWord) * 6;
1243 layout->s_arg7 = sizeof(UWord) * 7;
1244 layout->s_arg8 = sizeof(UWord) * 8;
1246 #elif defined(VGP_amd64_darwin)
1247 layout->o_sysno = OFFSET_amd64_RAX;
1248 layout->o_arg1 = OFFSET_amd64_RDI;
1249 layout->o_arg2 = OFFSET_amd64_RSI;
1250 layout->o_arg3 = OFFSET_amd64_RDX;
1251 layout->o_arg4 = OFFSET_amd64_RCX;
1252 layout->o_arg5 = OFFSET_amd64_R8;
1253 layout->o_arg6 = OFFSET_amd64_R9;
1254 layout->s_arg7 = sizeof(UWord) * 1;
1255 layout->s_arg8 = sizeof(UWord) * 2;
1257 #elif defined(VGP_x86_l4re)
1258 layout->o_sysno = OFFSET_x86_EAX;
1259 layout->o_arg1 = OFFSET_x86_EBX;
1260 layout->o_arg2 = OFFSET_x86_ECX;
1261 layout->o_arg3 = OFFSET_x86_EDX;
1262 layout->o_arg4 = OFFSET_x86_ESI;
1263 layout->o_arg5 = OFFSET_x86_EDI;
1264 layout->o_arg6 = OFFSET_x86_EBP;
1265 layout->o_arg7 = -1; /* impossible value */
1266 layout->o_arg8 = -1; /* impossible value */
1267 layout->o_retval = OFFSET_x86_EAX;
1269 #elif defined(VGP_s390x_linux)
1270 layout->o_sysno = OFFSET_s390x_SYSNO;
1271 layout->o_arg1 = OFFSET_s390x_r2;
1272 layout->o_arg2 = OFFSET_s390x_r3;
1273 layout->o_arg3 = OFFSET_s390x_r4;
1274 layout->o_arg4 = OFFSET_s390x_r5;
1275 layout->o_arg5 = OFFSET_s390x_r6;
1276 layout->o_arg6 = OFFSET_s390x_r7;
1277 layout->uu_arg7 = -1; /* impossible value */
1278 layout->uu_arg8 = -1; /* impossible value */
1280 # error "getSyscallLayout: unknown arch"
1285 /* ---------------------------------------------------------------------
1286 The main driver logic
1287 ------------------------------------------------------------------ */
1289 /* Finding the handlers for a given syscall, or faking up one
1290 when no handler is found. */
1293 void bad_before ( ThreadId tid,
1294 SyscallArgLayout* layout,
1295 /*MOD*/SyscallArgs* args,
1296 /*OUT*/SyscallStatus* status,
1297 /*OUT*/UWord* flags )
1299 VG_(dmsg)("WARNING: unhandled syscall: %s\n",
1300 VG_SYSNUM_STRING_EXTRA(args->sysno));
1301 if (VG_(clo_verbosity) > 1) {
1302 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
1304 VG_(dmsg)("You may be able to write your own handler.\n");
1305 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
1306 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
1307 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html.\n");
1309 SET_STATUS_Failure(VKI_ENOSYS);
1312 static SyscallTableEntry bad_sys =
1313 { bad_before, NULL };
1315 static const SyscallTableEntry* get_syscall_entry ( Int syscallno )
1317 const SyscallTableEntry* sys = NULL;
1319 # if defined(VGO_linux)
1320 sys = ML_(get_linux_syscall_entry)( syscallno );
1322 # elif defined(VGP_ppc32_aix5)
1323 sys = ML_(get_ppc32_aix5_syscall_entry) ( syscallno );
1325 # elif defined(VGP_ppc64_aix5)
1326 sys = ML_(get_ppc64_aix5_syscall_entry) ( syscallno );
1328 # elif defined(VGO_darwin)
1329 Int idx = VG_DARWIN_SYSNO_INDEX(syscallno);
1331 switch (VG_DARWIN_SYSNO_CLASS(syscallno)) {
1332 case VG_DARWIN_SYSCALL_CLASS_UNIX:
1333 if (idx >= 0 && idx < ML_(syscall_table_size) &&
1334 ML_(syscall_table)[idx].before != NULL)
1335 sys = &ML_(syscall_table)[idx];
1337 case VG_DARWIN_SYSCALL_CLASS_MACH:
1338 if (idx >= 0 && idx < ML_(mach_trap_table_size) &&
1339 ML_(mach_trap_table)[idx].before != NULL)
1340 sys = &ML_(mach_trap_table)[idx];
1342 case VG_DARWIN_SYSCALL_CLASS_MDEP:
1343 if (idx >= 0 && idx < ML_(mdep_trap_table_size) &&
1344 ML_(mdep_trap_table)[idx].before != NULL)
1345 sys = &ML_(mdep_trap_table)[idx];
1352 # elif defined(VGO_l4re)
1353 if (0) VG_(message)(Vg_DebugMsg, "syscallno = %x = %d\n", syscallno, syscallno);
1355 if (syscallno < ML_(syscall_table_size) &&
1356 ML_(syscall_table)[syscallno].before != NULL)
1357 sys = &ML_(syscall_table)[syscallno];
1363 return sys == NULL ? &bad_sys : sys;
1367 /* Add and remove signals from mask so that we end up telling the
1368 kernel the state we actually want rather than what the client
1370 static void sanitize_client_sigmask(vki_sigset_t *mask)
1372 VG_(sigdelset)(mask, VKI_SIGKILL);
1373 VG_(sigdelset)(mask, VKI_SIGSTOP);
1374 VG_(sigdelset)(mask, VG_SIGVGKILL); /* never block */
1379 SyscallArgs orig_args;
1381 SyscallStatus status;
1386 SyscallInfo syscallInfo[VG_N_THREADS];
1389 /* The scheduler needs to be able to zero out these records after a
1390 fork, hence this is exported from m_syswrap. */
1391 void VG_(clear_syscallInfo) ( Int tid )
1393 vg_assert(tid >= 0 && tid < VG_N_THREADS);
1394 VG_(memset)( & syscallInfo[tid], 0, sizeof( syscallInfo[tid] ));
1395 syscallInfo[tid].status.what = SsIdle;
1398 static void ensure_initialised ( void )
1401 static Bool init_done = False;
1405 for (i = 0; i < VG_N_THREADS; i++) {
1406 VG_(clear_syscallInfo)( i );
1410 /* --- This is the main function of this file. --- */
1412 void VG_(client_syscall) ( ThreadId tid, UInt trc )
1416 const SyscallTableEntry* ent;
1417 SyscallArgLayout layout;
1420 ensure_initialised();
1422 vg_assert(VG_(is_valid_tid)(tid));
1423 vg_assert(tid >= 1 && tid < VG_N_THREADS);
1424 vg_assert(VG_(is_running_thread)(tid));
1426 tst = VG_(get_ThreadState)(tid);
1428 /* BEGIN ensure root thread's stack is suitably mapped */
1429 /* In some rare circumstances, we may do the syscall without the
1430 bottom page of the stack being mapped, because the stack pointer
1431 was moved down just a few instructions before the syscall
1432 instruction, and there have been no memory references since
1433 then, that would cause a call to VG_(extend_stack) to have
1436 In native execution that's OK: the kernel automagically extends
1437 the stack's mapped area down to cover the stack pointer (or sp -
1438 redzone, really). In simulated normal execution that's OK too,
1439 since any signals we get from accessing below the mapped area of
1440 the (guest's) stack lead us to VG_(extend_stack), where we
1441 simulate the kernel's stack extension logic. But that leaves
1442 the problem of entering a syscall with the SP unmapped. Because
1443 the kernel doesn't know that the segment immediately above SP is
1444 supposed to be a grow-down segment, it causes the syscall to
1445 fail, and thereby causes a divergence between native behaviour
1446 (syscall succeeds) and simulated behaviour (syscall fails).
1448 This is quite a rare failure mode. It has only been seen
1449 affecting calls to sys_readlink on amd64-linux, and even then it
1450 requires a certain code sequence around the syscall to trigger
1453 extern int my_readlink ( const char* path );
1456 ".globl my_readlink\n"
1458 "\tsubq $0x1008,%rsp\n"
1459 "\tmovq %rdi,%rdi\n" // path is in rdi
1460 "\tmovq %rsp,%rsi\n" // &buf[0] -> rsi
1461 "\tmovl $0x1000,%edx\n" // sizeof(buf) in rdx
1462 "\tmovl $"__NR_READLINK",%eax\n" // syscall number
1464 "\taddq $0x1008,%rsp\n"
1469 For more details, see bug #156404
1470 (https://bugs.kde.org/show_bug.cgi?id=156404).
1472 The fix is actually very simple. We simply need to call
1473 VG_(extend_stack) for this thread, handing it the lowest
1474 possible valid address for stack (sp - redzone), to ensure the
1475 pages all the way down to that address, are mapped. Because
1476 this is a potentially expensive and frequent operation, we
1479 First, only the main thread (tid=1) has a growdown stack. So
1480 ignore all others. It is conceivable, although highly unlikely,
1481 that the main thread exits, and later another thread is
1482 allocated tid=1, but that's harmless, I believe;
1483 VG_(extend_stack) will do nothing when applied to a non-root
1486 Secondly, first call VG_(am_find_nsegment) directly, to see if
1487 the page holding (sp - redzone) is mapped correctly. If so, do
1488 nothing. This is almost always the case. VG_(extend_stack)
1489 calls VG_(am_find_nsegment) twice, so this optimisation -- and
1490 that's all it is -- more or less halves the number of calls to
1491 VG_(am_find_nsegment) required.
1493 TODO: the test "seg->kind == SkAnonC" is really inadequate,
1494 because although it tests whether the segment is mapped
1495 _somehow_, it doesn't check that it has the right permissions
1496 (r,w, maybe x) ? We could test that here, but it will also be
1497 necessary to fix the corresponding test in VG_(extend_stack).
1499 All this guff is of course Linux-specific. Hence the ifdef.
1501 # if defined(VGO_linux)
1502 if (tid == 1/*ROOT THREAD*/) {
1503 Addr stackMin = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
1504 NSegment const* seg = VG_(am_find_nsegment)(stackMin);
1505 if (seg && seg->kind == SkAnonC) {
1506 /* stackMin is already mapped. Nothing to do. */
1508 (void)VG_(extend_stack)( stackMin,
1509 tst->client_stack_szB );
1513 /* END ensure root thread's stack is suitably mapped */
1515 /* First off, get the syscall args and number. This is a
1516 platform-dependent action. */
1518 sci = & syscallInfo[tid];
1519 vg_assert(sci->status.what == SsIdle);
1521 getSyscallArgsFromGuestState( &sci->orig_args, &tst->arch.vex, trc );
1523 /* Copy .orig_args to .args. The pre-handler may modify .args, but
1524 we want to keep the originals too, just in case. */
1525 sci->args = sci->orig_args;
1527 /* Save the syscall number in the thread state in case the syscall
1528 is interrupted by a signal. */
1529 sysno = sci->orig_args.sysno;
1531 # if defined(VGO_darwin)
1532 /* Record syscall class. But why? Because the syscall might be
1533 interrupted by a signal, and in the signal handler (which will
1534 be m_signals.async_signalhandler) we will need to build a SysRes
1535 reflecting the syscall return result. In order to do that we
1536 need to know the syscall class. Hence stash it in the guest
1537 state of this thread. This madness is not needed on Linux or
1538 AIX5, because those OSs only have a single syscall return
1539 convention and so there is no ambiguity involved in converting
1540 the post-signal machine state into a SysRes. */
1541 tst->arch.vex.guest_SC_CLASS = VG_DARWIN_SYSNO_CLASS(sysno);
1544 /* The default what-to-do-next thing is hand the syscall to the
1545 kernel, so we pre-set that here. Set .sres to something
1546 harmless looking (is irrelevant because .what is not
1548 sci->status.what = SsHandToKernel;
1549 sci->status.sres = VG_(mk_SysRes_Error)(0);
1552 /* Fetch the syscall's handlers. If no handlers exist for this
1553 syscall, we are given dummy handlers which force an immediate
1554 return with ENOSYS. */
1555 ent = get_syscall_entry(sysno);
1557 /* Fetch the layout information, which tells us where in the guest
1558 state the syscall args reside. This is a platform-dependent
1559 action. This info is needed so that the scalar syscall argument
1560 checks (PRE_REG_READ calls) know which bits of the guest state
1561 they need to inspect. */
1562 getSyscallArgLayout( &layout );
1564 #if !defined(VGO_l4re)
1565 /* Make sure the tmp signal mask matches the real signal mask;
1566 sigsuspend may change this. */
1567 vg_assert(VG_(iseqsigset)(&tst->sig_mask, &tst->tmp_sig_mask));
1570 /* Right, we're finally ready to Party. Call the pre-handler and
1571 see what we get back. At this point:
1573 sci->status.what is Unset (we don't know yet).
1574 sci->orig_args contains the original args.
1575 sci->args is the same as sci->orig_args.
1579 PRINT("SYSCALL[%d,%d](%s) ",
1580 VG_(getpid)(), tid, VG_SYSNUM_STRING(sysno));
1582 /* Do any pre-syscall actions */
1583 if (VG_(needs).syscall_wrapper) {
1585 tmpv[0] = sci->orig_args.arg1;
1586 tmpv[1] = sci->orig_args.arg2;
1587 tmpv[2] = sci->orig_args.arg3;
1588 tmpv[3] = sci->orig_args.arg4;
1589 tmpv[4] = sci->orig_args.arg5;
1590 tmpv[5] = sci->orig_args.arg6;
1591 tmpv[6] = sci->orig_args.arg7;
1592 tmpv[7] = sci->orig_args.arg8;
1593 VG_TDICT_CALL(tool_pre_syscall, tid, sysno,
1594 &tmpv[0], sizeof(tmpv)/sizeof(tmpv[0]));
1598 vg_assert(ent->before);
1601 &sci->args, &sci->status, &sci->flags );
1603 /* The pre-handler may have modified:
1607 All else remains unchanged.
1608 Although the args may be modified, pre handlers are not allowed
1609 to change the syscall number.
1611 /* Now we proceed according to what the pre-handler decided. */
1612 vg_assert(sci->status.what == SsHandToKernel
1613 || sci->status.what == SsComplete);
1614 vg_assert(sci->args.sysno == sci->orig_args.sysno);
1616 if (sci->status.what == SsComplete && !sr_isError(sci->status.sres)) {
1617 /* The pre-handler completed the syscall itself, declaring
1619 if (sci->flags & SfNoWriteResult) {
1620 PRINT(" --> [pre-success] NoWriteResult");
1622 PRINT(" --> [pre-success] Success(0x%llx:0x%llx)",
1623 (ULong)sr_ResHI(sci->status.sres),
1624 (ULong)sr_Res(sci->status.sres));
1626 /* In this case the allowable flags are to ask for a signal-poll
1627 and/or a yield after the call. Changing the args isn't
1629 vg_assert(0 == (sci->flags
1630 & ~(SfPollAfter | SfYieldAfter | SfNoWriteResult)));
1631 vg_assert(eq_SyscallArgs(&sci->args, &sci->orig_args));
1635 if (sci->status.what == SsComplete && sr_isError(sci->status.sres)) {
1636 /* The pre-handler decided to fail syscall itself. */
1637 PRINT(" --> [pre-fail] Failure(0x%llx)", (ULong)sr_Err(sci->status.sres));
1638 /* In this case, the pre-handler is also allowed to ask for the
1639 post-handler to be run anyway. Changing the args is not
1641 vg_assert(0 == (sci->flags & ~(SfMayBlock | SfPostOnFail | SfPollAfter)));
1642 vg_assert(eq_SyscallArgs(&sci->args, &sci->orig_args));
1646 if (sci->status.what != SsHandToKernel) {
1651 else /* (sci->status.what == HandToKernel) */ {
1652 /* Ok, this is the usual case -- and the complicated one. There
1653 are two subcases: sync and async. async is the general case
1654 and is to be used when there is any possibility that the
1655 syscall might block [a fact that the pre-handler must tell us
1656 via the sci->flags field.] Because the tidying-away /
1657 context-switch overhead of the async case could be large, if
1658 we are sure that the syscall will not block, we fast-track it
1659 by doing it directly in this thread, which is a lot
1662 /* Check that the given flags are allowable: MayBlock, PollAfter
1663 and PostOnFail are ok. */
1664 vg_assert(0 == (sci->flags & ~(SfMayBlock | SfPostOnFail | SfPollAfter)));
1666 if (sci->flags & SfMayBlock) {
1668 /* Syscall may block, so run it asynchronously */
1669 #if defined(VGO_l4re)
1675 PRINT(" --> [async] ... \n");
1677 #if !defined(VGO_l4re)
1678 mask = tst->sig_mask;
1679 sanitize_client_sigmask(&mask);
1682 /* Gack. More impedance matching. Copy the possibly
1683 modified syscall args back into the guest state. */
1684 /* JRS 2009-Mar-16: if the syscall args are possibly modified,
1685 then this assertion is senseless:
1686 vg_assert(eq_SyscallArgs(&sci->args, &sci->orig_args));
1687 The case that exposed it was sys_posix_spawn on Darwin,
1688 which heavily modifies its arguments but then lets the call
1689 go through anyway, with SfToBlock set, hence we end up here. */
1690 putSyscallArgsIntoGuestState( &sci->args, &tst->arch.vex );
1692 /* Drop the bigLock */
1693 VG_(release_BigLock)(tid, VgTs_WaitSys, "VG_(client_syscall)[async]");
1694 /* Urr. We're now in a race against other threads trying to
1695 acquire the bigLock. I guess that doesn't matter provided
1696 that do_syscall_for_client only touches thread-local
1699 #if defined(VGO_l4re)
1700 // TODO should we not call do_syscall_for_client
1701 sres = VG_(do_l4re_syscall)(&sci->args, tid);
1703 sci->status = convert_SysRes_to_SyscallStatus(sres);
1704 /* After a syscall on L4, every register can be modified by the kernel.
1705 Because of this, it is important to flush these changes back to the
1707 putSyscallArgsIntoGuestState( &sci->args, &tst->arch.vex );
1708 /* Do the call, which operates directly on the guest state,
1709 not on our abstracted copies of the args/result. */
1711 do_syscall_for_client(sysno, tst, &mask);
1713 /* do_syscall_for_client may not return if the syscall was
1714 interrupted by a signal. In that case, flow of control is
1715 first to m_signals.async_sighandler, which calls
1716 VG_(fixup_guest_state_after_syscall_interrupted), which
1717 fixes up the guest state, and possibly calls
1718 VG_(post_syscall). Once that's done, control drops back
1719 to the scheduler. */
1721 /* Darwin: do_syscall_for_client may not return if the
1722 syscall was workq_ops(WQOPS_THREAD_RETURN) and the kernel
1723 responded by starting the thread at wqthread_hijack(reuse=1)
1724 (to run another workqueue item). In that case, wqthread_hijack
1725 calls ML_(wqthread_continue), which is similar to
1726 VG_(fixup_guest_state_after_syscall_interrupted). */
1729 /* Reacquire the lock */
1730 VG_(acquire_BigLock)(tid, "VG_(client_syscall)[async]");
1732 /* Even more impedance matching. Extract the syscall status
1733 from the guest state. */
1734 getSyscallStatusFromGuestState( &sci->status, &tst->arch.vex );
1735 vg_assert(sci->status.what == SsComplete);
1737 /* Be decorative, if required. */
1738 if (VG_(clo_trace_syscalls)) {
1739 Bool failed = sr_isError(sci->status.sres);
1741 PRINT("SYSCALL[%d,%d](%s) ... [async] --> Failure(0x%llx)",
1742 VG_(getpid)(), tid, VG_SYSNUM_STRING(sysno),
1743 (ULong)sr_Err(sci->status.sres));
1745 PRINT("SYSCALL[%d,%d](%s) ... [async] --> "
1746 "Success(0x%llx:0x%llx)",
1747 VG_(getpid)(), tid, VG_SYSNUM_STRING(sysno),
1748 (ULong)sr_ResHI(sci->status.sres),
1749 (ULong)sr_Res(sci->status.sres) );
1755 /* run the syscall directly */
1756 #if defined(VGO_l4re)
1757 SysRes sres = VG_(do_l4re_syscall)(&sci->args, tid);
1758 /* After a syscall on L4, every register can be modified by the kernel.
1759 Because of this, it is important to flush these changes back to the
1761 putSyscallArgsIntoGuestState( &sci->args, &tst->arch.vex );
1763 /* The pre-handler may have modified the syscall args, but
1764 since we're passing values in ->args directly to the
1765 kernel, there's no point in flushing them back to the
1766 guest state. Indeed doing so could be construed as
1769 = VG_(do_syscall)(sysno, sci->args.arg1, sci->args.arg2,
1770 sci->args.arg3, sci->args.arg4,
1771 sci->args.arg5, sci->args.arg6,
1772 sci->args.arg7, sci->args.arg8 );
1774 sci->status = convert_SysRes_to_SyscallStatus(sres);
1776 /* Be decorative, if required. */
1777 if (VG_(clo_trace_syscalls)) {
1778 Bool failed = sr_isError(sci->status.sres);
1780 PRINT("[sync] --> Failure(0x%llx)",
1781 (ULong)sr_Err(sci->status.sres) );
1783 PRINT("[sync] --> Success(0x%llx:0x%llx)",
1784 (ULong)sr_ResHI(sci->status.sres),
1785 (ULong)sr_Res(sci->status.sres) );
1791 vg_assert(sci->status.what == SsComplete);
1793 vg_assert(VG_(is_running_thread)(tid));
1795 /* Dump the syscall result back in the guest state. This is
1796 a platform-specific action. */
1797 if (!(sci->flags & SfNoWriteResult))
1798 putSyscallStatusIntoGuestState( tid, &sci->status, &tst->arch.vex );
1801 - the guest state is now correctly modified following the syscall
1802 - modified args, original args and syscall status are still
1803 available in the syscallInfo[] entry for this syscall.
1805 Now go on to do the post-syscall actions (read on down ..)
1808 VG_(post_syscall)(tid);
1813 /* Perform post syscall actions. The expected state on entry is
1814 precisely as at the end of VG_(client_syscall), that is:
1816 - guest state up to date following the syscall
1817 - modified args, original args and syscall status are still
1818 available in the syscallInfo[] entry for this syscall.
1819 - syscall status matches what's in the guest state.
1821 There are two ways to get here: the normal way -- being called by
1822 VG_(client_syscall), and the unusual way, from
1823 VG_(fixup_guest_state_after_syscall_interrupted).
1824 Darwin: there's a third way, ML_(wqthread_continue).
1826 void VG_(post_syscall) (ThreadId tid)
1829 const SyscallTableEntry* ent;
1830 SyscallStatus test_status;
1835 vg_assert(VG_(is_valid_tid)(tid));
1836 vg_assert(tid >= 1 && tid < VG_N_THREADS);
1837 vg_assert(VG_(is_running_thread)(tid));
1839 tst = VG_(get_ThreadState)(tid);
1840 sci = & syscallInfo[tid];
1842 #if defined(VGO_l4re)
1843 if (0) VG_(printf)("--> %s(tid=%d)\n", __func__, tid);
1846 /* m_signals.sigvgkill_handler might call here even when not in
1848 if (sci->status.what == SsIdle || sci->status.what == SsHandToKernel) {
1849 sci->status.what = SsIdle;
1853 /* Validate current syscallInfo entry. In particular we require
1854 that the current .status matches what's actually in the guest
1855 state. At least in the normal case where we have actually
1856 previously written the result into the guest state. */
1857 vg_assert(sci->status.what == SsComplete);
1859 #if !defined(VGO_l4re)
1860 getSyscallStatusFromGuestState( &test_status, &tst->arch.vex );
1861 if (!(sci->flags & SfNoWriteResult))
1862 vg_assert(eq_SyscallStatus( &sci->status, &test_status ));
1863 /* Failure of the above assertion on Darwin can indicate a problem
1864 in the syscall wrappers that pre-fail or pre-succeed the
1865 syscall, by calling SET_STATUS_Success or SET_STATUS_Failure,
1866 when they really should call SET_STATUS_from_SysRes. The former
1867 create a UNIX-class syscall result on Darwin, which may not be
1868 correct for the syscall; if that's the case then this assertion
1869 fires. See PRE(thread_fast_set_cthread_self) for an example. On
1870 non-Darwin platforms this assertion is should never fail, and this
1871 comment is completely irrelevant. */
1872 /* Ok, looks sane */
1874 /* Get the system call number. Because the pre-handler isn't
1875 allowed to mess with it, it should be the same for both the
1876 original and potentially-modified args. */
1877 vg_assert(sci->args.sysno == sci->orig_args.sysno);
1880 sysno = sci->args.sysno;
1882 ent = get_syscall_entry(sysno);
1884 /* pre: status == Complete (asserted above) */
1885 /* Consider either success or failure. Now run the post handler if:
1887 - Success or (Failure and PostOnFail is set)
1890 && ((!sr_isError(sci->status.sres))
1891 || (sr_isError(sci->status.sres)
1892 && (sci->flags & SfPostOnFail) ))) {
1894 (ent->after)( tid, &sci->args, &sci->status );
1897 /* Because the post handler might have changed the status (eg, the
1898 post-handler for sys_open can change the result from success to
1899 failure if the kernel supplied a fd that it doesn't like), once
1900 again dump the syscall result back in the guest state.*/
1901 if (!(sci->flags & SfNoWriteResult))
1902 putSyscallStatusIntoGuestState( tid, &sci->status, &tst->arch.vex );
1904 /* Do any post-syscall actions required by the tool. */
1905 if (VG_(needs).syscall_wrapper) {
1907 tmpv[0] = sci->orig_args.arg1;
1908 tmpv[1] = sci->orig_args.arg2;
1909 tmpv[2] = sci->orig_args.arg3;
1910 tmpv[3] = sci->orig_args.arg4;
1911 tmpv[4] = sci->orig_args.arg5;
1912 tmpv[5] = sci->orig_args.arg6;
1913 tmpv[6] = sci->orig_args.arg7;
1914 tmpv[7] = sci->orig_args.arg8;
1915 VG_TDICT_CALL(tool_post_syscall, tid,
1917 &tmpv[0], sizeof(tmpv)/sizeof(tmpv[0]),
1921 /* The syscall is done. */
1922 vg_assert(sci->status.what == SsComplete);
1923 sci->status.what = SsIdle;
1925 #if !defined(VGO_l4re)
1926 /* The pre/post wrappers may have concluded that pending signals
1927 might have been created, and will have set SfPollAfter to
1928 request a poll for them once the syscall is done. */
1929 if (sci->flags & SfPollAfter)
1930 VG_(poll_signals)(tid);
1933 /* Similarly, the wrappers might have asked for a yield
1935 if (sci->flags & SfYieldAfter)
1940 /* ---------------------------------------------------------------------
1941 Dealing with syscalls which get interrupted by a signal:
1942 VG_(fixup_guest_state_after_syscall_interrupted)
1943 ------------------------------------------------------------------ */
1945 /* Syscalls done on behalf of the client are finally handed off to the
1946 kernel in VG_(client_syscall) above, either by calling
1947 do_syscall_for_client (the async case), or by calling
1948 VG_(do_syscall6) (the sync case).
1950 If the syscall is not interrupted by a signal (it may block and
1951 later unblock, but that's irrelevant here) then those functions
1952 eventually return and so control is passed to VG_(post_syscall).
1953 NB: not sure if the sync case can actually get interrupted, as it
1954 operates with all signals masked.
1956 However, the syscall may get interrupted by an async-signal. In
1957 that case do_syscall_for_client/VG_(do_syscall6) do not
1958 return. Instead we wind up in m_signals.async_sighandler. We need
1959 to fix up the guest state to make it look like the syscall was
1960 interrupted for guest. So async_sighandler calls here, and this
1961 does the fixup. Note that from here we wind up calling
1962 VG_(post_syscall) too.
1966 /* These are addresses within ML_(do_syscall_for_client_WRK). See
1967 syscall-$PLAT.S for details.
1969 #if defined(VGO_linux) || defined(VGO_aix5)
1970 extern const Addr ML_(blksys_setup);
1971 extern const Addr ML_(blksys_restart);
1972 extern const Addr ML_(blksys_complete);
1973 extern const Addr ML_(blksys_committed);
1974 extern const Addr ML_(blksys_finished);
1975 #elif defined(VGO_darwin)
1976 /* Darwin requires extra uglyness */
1977 extern const Addr ML_(blksys_setup_MACH);
1978 extern const Addr ML_(blksys_restart_MACH);
1979 extern const Addr ML_(blksys_complete_MACH);
1980 extern const Addr ML_(blksys_committed_MACH);
1981 extern const Addr ML_(blksys_finished_MACH);
1982 extern const Addr ML_(blksys_setup_MDEP);
1983 extern const Addr ML_(blksys_restart_MDEP);
1984 extern const Addr ML_(blksys_complete_MDEP);
1985 extern const Addr ML_(blksys_committed_MDEP);
1986 extern const Addr ML_(blksys_finished_MDEP);
1987 extern const Addr ML_(blksys_setup_UNIX);
1988 extern const Addr ML_(blksys_restart_UNIX);
1989 extern const Addr ML_(blksys_complete_UNIX);
1990 extern const Addr ML_(blksys_committed_UNIX);
1991 extern const Addr ML_(blksys_finished_UNIX);
1992 #elif defined(VGO_l4re)
1995 # error "Unknown OS"
1999 /* Back up guest state to restart a system call. */
2001 void ML_(fixup_guest_state_to_restart_syscall) ( ThreadArchState* arch )
2003 #if defined(VGP_x86_linux)
2004 arch->vex.guest_EIP -= 2; // sizeof(int $0x80)
2006 /* Make sure our caller is actually sane, and we're really backing
2007 back over a syscall.
2012 UChar *p = (UChar *)arch->vex.guest_EIP;
2014 if (p[0] != 0xcd || p[1] != 0x80)
2015 VG_(message)(Vg_DebugMsg,
2016 "?! restarting over syscall at %#x %02x %02x\n",
2017 arch->vex.guest_EIP, p[0], p[1]);
2019 vg_assert(p[0] == 0xcd && p[1] == 0x80);
2022 #elif defined(VGP_amd64_linux)
2023 arch->vex.guest_RIP -= 2; // sizeof(syscall)
2025 /* Make sure our caller is actually sane, and we're really backing
2026 back over a syscall.
2031 UChar *p = (UChar *)arch->vex.guest_RIP;
2033 if (p[0] != 0x0F || p[1] != 0x05)
2034 VG_(message)(Vg_DebugMsg,
2035 "?! restarting over syscall at %#llx %02x %02x\n",
2036 arch->vex.guest_RIP, p[0], p[1]);
2038 vg_assert(p[0] == 0x0F && p[1] == 0x05);
2041 #elif defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
2042 arch->vex.guest_CIA -= 4; // sizeof(ppc32 instr)
2044 /* Make sure our caller is actually sane, and we're really backing
2045 back over a syscall.
2050 UChar *p = (UChar *)arch->vex.guest_CIA;
2052 if (p[0] != 0x44 || p[1] != 0x0 || p[2] != 0x0 || p[3] != 0x02)
2053 VG_(message)(Vg_DebugMsg,
2054 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2055 arch->vex.guest_CIA + 0ULL, p[0], p[1], p[2], p[3]);
2057 vg_assert(p[0] == 0x44 && p[1] == 0x0 && p[2] == 0x0 && p[3] == 0x2);
2060 #elif defined(VGP_arm_linux)
2061 if (arch->vex.guest_R15T & 1) {
2062 // Thumb mode. SVC is a encoded as
2064 // where imm8 is the SVC number, and we only accept 0.
2065 arch->vex.guest_R15T -= 2; // sizeof(thumb 16 bit insn)
2066 UChar* p = (UChar*)(arch->vex.guest_R15T - 1);
2067 Bool valid = p[0] == 0 && p[1] == 0xDF;
2069 VG_(message)(Vg_DebugMsg,
2070 "?! restarting over (Thumb) syscall that is not syscall "
2071 "at %#llx %02x %02x\n",
2072 arch->vex.guest_R15T - 1ULL, p[0], p[1]);
2075 // FIXME: NOTE, this really isn't right. We need to back up
2076 // ITSTATE to what it was before the SVC instruction, but we
2077 // don't know what it was. At least assert that it is now
2078 // zero, because if it is nonzero then it must also have
2079 // been nonzero for the SVC itself, which means it was
2080 // conditional. Urk.
2081 vg_assert(arch->vex.guest_ITSTATE == 0);
2083 // ARM mode. SVC is encoded as
2085 // where imm24 is the SVC number, and we only accept 0.
2086 arch->vex.guest_R15T -= 4; // sizeof(arm instr)
2087 UChar* p = (UChar*)arch->vex.guest_R15T;
2088 Bool valid = p[0] == 0 && p[1] == 0 && p[2] == 0
2089 && (p[3] & 0xF) == 0xF;
2091 VG_(message)(Vg_DebugMsg,
2092 "?! restarting over (ARM) syscall that is not syscall "
2093 "at %#llx %02x %02x %02x %02x\n",
2094 arch->vex.guest_R15T + 0ULL, p[0], p[1], p[2], p[3]);
2099 #elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
2100 /* Hmm. This is problematic, because on AIX the kernel resumes
2101 after a syscall at LR, not at the insn following SC. Hence
2102 there is no obvious way to figure out where the SC is. Current
2103 solution is to have a pseudo-register in the guest state,
2104 CIA_AT_SC, which holds the address of the most recent SC
2105 executed. Backing up to that syscall then simply involves
2106 copying that value back into CIA (the program counter). */
2107 arch->vex.guest_CIA = arch->vex.guest_CIA_AT_SC;
2109 /* Make sure our caller is actually sane, and we're really backing
2110 back over a syscall.
2115 UChar *p = (UChar *)arch->vex.guest_CIA;
2117 if (p[0] != 0x44 || p[1] != 0x0 || p[2] != 0x0 || p[3] != 0x02)
2118 VG_(message)(Vg_DebugMsg,
2119 "?! restarting over syscall at %#lx %02x %02x %02x %02x\n",
2120 (UWord)arch->vex.guest_CIA, p[0], p[1], p[2], p[3]);
2122 vg_assert(p[0] == 0x44 && p[1] == 0x0 && p[2] == 0x0 && p[3] == 0x2);
2125 #elif defined(VGP_x86_darwin)
2126 arch->vex.guest_EIP = arch->vex.guest_IP_AT_SYSCALL;
2128 /* Make sure our caller is actually sane, and we're really backing
2129 back over a syscall.
2137 UChar *p = (UChar *)arch->vex.guest_EIP;
2138 Bool ok = (p[0] == 0xCD && p[1] == 0x80)
2139 || (p[0] == 0xCD && p[1] == 0x81)
2140 || (p[0] == 0xCD && p[1] == 0x82)
2141 || (p[0] == 0x0F && p[1] == 0x34);
2143 VG_(message)(Vg_DebugMsg,
2144 "?! restarting over syscall at %#x %02x %02x\n",
2145 arch->vex.guest_EIP, p[0], p[1]);
2149 #elif defined(VGP_amd64_darwin)
2150 // DDD: #warning GrP fixme amd64 restart unimplemented
2153 #elif defined(VGP_s390x_linux)
2154 arch->vex.guest_IA -= 2; // sizeof(syscall)
2156 /* Make sure our caller is actually sane, and we're really backing
2157 back over a syscall.
2162 UChar *p = (UChar *)arch->vex.guest_IA;
2164 VG_(message)(Vg_DebugMsg,
2165 "?! restarting over syscall at %#llx %02x %02x\n",
2166 arch->vex.guest_IA, p[0], p[1]);
2168 vg_assert(p[0] == 0x0A);
2172 #elif defined(VGO_l4re)
2173 VG_(unimplemented)("unimplemented function ML_(fixup_guest_state_to_restart_syscall)()");
2176 # error "ML_(fixup_guest_state_to_restart_syscall): unknown plat"
2180 #if !defined(VGO_l4re)
2182 Fix up the guest state when a syscall is interrupted by a signal
2183 and so has been forced to return 'sysret'.
2185 To do this, we determine the precise state of the syscall by
2186 looking at the (real) IP at the time the signal happened. The
2187 syscall sequence looks like:
2191 3. save result to guest state (EAX, RAX, R3+CR0.SO)
2195 happens at Then Why?
2196 [1-2) restart nothing has happened (restart syscall)
2197 [2] restart syscall hasn't started, or kernel wants to restart
2198 [2-3) save syscall complete, but results not saved
2199 [3-4) syscall complete, results saved
2201 Sometimes we never want to restart an interrupted syscall (because
2202 sigaction says not to), so we only restart if "restart" is True.
2204 This will also call VG_(post_syscall) if the syscall has actually
2205 completed (either because it was interrupted, or because it
2206 actually finished). It will not call VG_(post_syscall) if the
2207 syscall is set up for restart, which means that the pre-wrapper may
2208 get called multiple times.
2212 VG_(fixup_guest_state_after_syscall_interrupted)( ThreadId tid,
2217 /* Note that we don't know the syscall number here, since (1) in
2218 general there's no reliable way to get hold of it short of
2219 stashing it in the guest state before the syscall, and (2) in
2220 any case we don't need to know it for the actions done by this
2223 Furthermore, 'sres' is only used in the case where the syscall
2224 is complete, but the result has not been committed to the guest
2225 state yet. In any other situation it will be meaningless and
2226 therefore ignored. */
2229 SyscallStatus canonical;
2230 ThreadArchState* th_regs;
2233 /* Compute some Booleans indicating which range we're in. */
2235 in_setup_to_restart, // [1,2) in the .S files
2236 at_restart, // [2] in the .S files
2237 in_complete_to_committed, // [3,4) in the .S files
2238 in_committed_to_finished; // [4,5) in the .S files
2240 # if defined(VGO_linux) || defined(VGO_aix5)
2242 = ip < ML_(blksys_setup) || ip >= ML_(blksys_finished);
2244 = ip >= ML_(blksys_setup) && ip < ML_(blksys_restart);
2246 = ip == ML_(blksys_restart);
2247 in_complete_to_committed
2248 = ip >= ML_(blksys_complete) && ip < ML_(blksys_committed);
2249 in_committed_to_finished
2250 = ip >= ML_(blksys_committed) && ip < ML_(blksys_finished);
2251 # elif defined(VGO_darwin)
2253 = (ip < ML_(blksys_setup_MACH) || ip >= ML_(blksys_finished_MACH))
2254 && (ip < ML_(blksys_setup_MDEP) || ip >= ML_(blksys_finished_MDEP))
2255 && (ip < ML_(blksys_setup_UNIX) || ip >= ML_(blksys_finished_UNIX));
2257 = (ip >= ML_(blksys_setup_MACH) && ip < ML_(blksys_restart_MACH))
2258 || (ip >= ML_(blksys_setup_MDEP) && ip < ML_(blksys_restart_MDEP))
2259 || (ip >= ML_(blksys_setup_UNIX) && ip < ML_(blksys_restart_UNIX));
2261 = (ip == ML_(blksys_restart_MACH))
2262 || (ip == ML_(blksys_restart_MDEP))
2263 || (ip == ML_(blksys_restart_UNIX));
2264 in_complete_to_committed
2265 = (ip >= ML_(blksys_complete_MACH) && ip < ML_(blksys_committed_MACH))
2266 || (ip >= ML_(blksys_complete_MDEP) && ip < ML_(blksys_committed_MDEP))
2267 || (ip >= ML_(blksys_complete_UNIX) && ip < ML_(blksys_committed_UNIX));
2268 in_committed_to_finished
2269 = (ip >= ML_(blksys_committed_MACH) && ip < ML_(blksys_finished_MACH))
2270 || (ip >= ML_(blksys_committed_MDEP) && ip < ML_(blksys_finished_MDEP))
2271 || (ip >= ML_(blksys_committed_UNIX) && ip < ML_(blksys_finished_UNIX));
2272 /* Wasn't that just So Much Fun? Does your head hurt yet? Mine does. */
2274 # error "Unknown OS"
2277 if (VG_(clo_trace_signals))
2278 VG_(message)( Vg_DebugMsg,
2279 "interrupted_syscall: tid=%d, ip=0x%llx, "
2280 "restart=%s, sres.isErr=%s, sres.val=%lld\n",
2283 restart ? "True" : "False",
2284 sr_isError(sres) ? "True" : "False",
2285 (Long)(sr_isError(sres) ? sr_Err(sres) : sr_Res(sres)) );
2287 vg_assert(VG_(is_valid_tid)(tid));
2288 vg_assert(tid >= 1 && tid < VG_N_THREADS);
2289 vg_assert(VG_(is_running_thread)(tid));
2291 tst = VG_(get_ThreadState)(tid);
2292 th_regs = &tst->arch;
2293 sci = & syscallInfo[tid];
2295 /* Figure out what the state of the syscall was by examining the
2296 (real) IP at the time of the signal, and act accordingly. */
2297 if (outside_range) {
2298 if (VG_(clo_trace_signals))
2299 VG_(message)( Vg_DebugMsg,
2300 " not in syscall at all: hmm, very suspicious\n" );
2301 /* Looks like we weren't in a syscall at all. Hmm. */
2302 vg_assert(sci->status.what != SsIdle);
2306 /* We should not be here unless this thread had first started up
2307 the machinery for a syscall by calling VG_(client_syscall).
2309 vg_assert(sci->status.what != SsIdle);
2311 /* now, do one of four fixup actions, depending on where the IP has
2314 if (in_setup_to_restart) {
2315 /* syscall hasn't even started; go around again */
2316 if (VG_(clo_trace_signals))
2317 VG_(message)( Vg_DebugMsg, " not started: restarting\n");
2318 vg_assert(sci->status.what == SsHandToKernel);
2319 ML_(fixup_guest_state_to_restart_syscall)(th_regs);
2324 /* We're either about to run the syscall, or it was interrupted
2325 and the kernel restarted it. Restart if asked, otherwise
2328 if (VG_(clo_trace_signals))
2329 VG_(message)( Vg_DebugMsg, " at syscall instr: restarting\n");
2330 ML_(fixup_guest_state_to_restart_syscall)(th_regs);
2332 if (VG_(clo_trace_signals))
2333 VG_(message)( Vg_DebugMsg, " at syscall instr: returning EINTR\n");
2334 canonical = convert_SysRes_to_SyscallStatus(
2335 VG_(mk_SysRes_Error)( VKI_EINTR )
2337 if (!(sci->flags & SfNoWriteResult))
2338 putSyscallStatusIntoGuestState( tid, &canonical, &th_regs->vex );
2339 sci->status = canonical;
2340 VG_(post_syscall)(tid);
2345 if (in_complete_to_committed) {
2346 /* Syscall complete, but result hasn't been written back yet.
2347 Write the SysRes we were supplied with back to the guest
2349 if (VG_(clo_trace_signals))
2350 VG_(message)( Vg_DebugMsg,
2351 " completed, but uncommitted: committing\n");
2352 canonical = convert_SysRes_to_SyscallStatus( sres );
2353 if (!(sci->flags & SfNoWriteResult))
2354 putSyscallStatusIntoGuestState( tid, &canonical, &th_regs->vex );
2355 sci->status = canonical;
2356 VG_(post_syscall)(tid);
2360 if (in_committed_to_finished) {
2361 /* Result committed, but the signal mask has not been restored;
2362 we expect our caller (the signal handler) will have fixed
2364 if (VG_(clo_trace_signals))
2365 VG_(message)( Vg_DebugMsg,
2366 " completed and committed: nothing to do\n");
2367 getSyscallStatusFromGuestState( &sci->status, &th_regs->vex );
2368 vg_assert(sci->status.what == SsComplete);
2369 VG_(post_syscall)(tid);
2373 VG_(core_panic)("?? strange syscall interrupt state?");
2375 /* In all cases, the syscall is now finished (even if we called
2376 ML_(fixup_guest_state_to_restart_syscall), since that just
2377 re-positions the guest's IP for another go at it). So we need
2378 to record that fact. */
2379 sci->status.what = SsIdle;
2384 #if defined(VGO_darwin)
2385 // Clean up after workq_ops(WQOPS_THREAD_RETURN) jumped to wqthread_hijack.
2386 // This is similar to VG_(fixup_guest_state_after_syscall_interrupted).
2387 // This longjmps back to the scheduler.
2388 void ML_(wqthread_continue_NORETURN)(ThreadId tid)
2393 VG_(acquire_BigLock)(tid, "wqthread_continue_NORETURN");
2395 PRINT("SYSCALL[%d,%d](%s) workq_ops() starting new workqueue item\n",
2396 VG_(getpid)(), tid, VG_SYSNUM_STRING(__NR_workq_ops));
2398 vg_assert(VG_(is_valid_tid)(tid));
2399 vg_assert(tid >= 1 && tid < VG_N_THREADS);
2400 vg_assert(VG_(is_running_thread)(tid));
2402 tst = VG_(get_ThreadState)(tid);
2403 sci = & syscallInfo[tid];
2404 vg_assert(sci->status.what != SsIdle);
2405 vg_assert(tst->os_state.wq_jmpbuf_valid); // check this BEFORE post_syscall
2407 // Pretend the syscall completed normally, but don't touch the thread state.
2408 sci->status = convert_SysRes_to_SyscallStatus( VG_(mk_SysRes_Success)(0) );
2409 sci->flags |= SfNoWriteResult;
2410 VG_(post_syscall)(tid);
2412 sci->status.what = SsIdle;
2414 vg_assert(tst->sched_jmpbuf_valid);
2415 VG_MINIMAL_LONGJMP(tst->sched_jmpbuf);
2423 /* ---------------------------------------------------------------------
2424 A place to store the where-to-call-when-really-done pointer
2425 ------------------------------------------------------------------ */
2427 // When the final thread is done, where shall I call to shutdown the
2428 // system cleanly? Is set once at startup (in m_main) and never
2429 // changes after that. Is basically a pointer to the exit
2430 // continuation. This is all just a nasty hack to avoid calling
2431 // directly from m_syswrap to m_main at exit, since that would cause
2432 // m_main to become part of a module cycle, which is silly.
2433 void (* VG_(address_of_m_main_shutdown_actions_NORETURN) )
2434 (ThreadId,VgSchedReturnCode)
2437 /*--------------------------------------------------------------------*/
2439 /*--------------------------------------------------------------------*/