2 /*--------------------------------------------------------------------*/
3 /*--- Machine-related stuff. m_machine.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2010 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "pub_core_basics.h"
32 #include "pub_core_vki.h"
33 #include "pub_core_threadstate.h"
34 #include "pub_core_libcassert.h"
35 #include "pub_core_libcbase.h"
36 #include "pub_core_machine.h"
37 #include "pub_core_cpuid.h"
38 #include "pub_core_libcsignal.h" // for ppc32 messing with SIGILL and SIGFPE
39 #include "pub_core_debuglog.h"
42 #define INSTR_PTR(regs) ((regs).vex.VG_INSTR_PTR)
43 #define STACK_PTR(regs) ((regs).vex.VG_STACK_PTR)
44 #define FRAME_PTR(regs) ((regs).vex.VG_FRAME_PTR)
46 Addr VG_(get_SP) ( ThreadId tid )
48 return STACK_PTR( VG_(threads)[tid].arch );
51 Addr VG_(get_IP) ( ThreadId tid )
53 return INSTR_PTR( VG_(threads)[tid].arch );
56 Addr VG_(get_FP) ( ThreadId tid )
58 return FRAME_PTR( VG_(threads)[tid].arch );
61 Addr VG_(get_LR) ( ThreadId tid )
63 # if defined(VGA_ppc32) || defined(VGA_ppc64)
64 return VG_(threads)[tid].arch.vex.guest_LR;
65 # elif defined(VGA_x86) || defined(VGA_amd64)
67 # elif defined(VGA_arm)
68 return VG_(threads)[tid].arch.vex.guest_R14;
70 # error "Unknown arch"
74 void VG_(set_SP) ( ThreadId tid, Addr sp )
76 STACK_PTR( VG_(threads)[tid].arch ) = sp;
79 void VG_(set_IP) ( ThreadId tid, Addr ip )
81 INSTR_PTR( VG_(threads)[tid].arch ) = ip;
85 void VG_(get_UnwindStartRegs) ( /*OUT*/UnwindStartRegs* regs,
89 regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_EIP;
90 regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_ESP;
92 = VG_(threads)[tid].arch.vex.guest_EBP;
93 # elif defined(VGA_amd64)
94 regs->r_pc = VG_(threads)[tid].arch.vex.guest_RIP;
95 regs->r_sp = VG_(threads)[tid].arch.vex.guest_RSP;
96 regs->misc.AMD64.r_rbp
97 = VG_(threads)[tid].arch.vex.guest_RBP;
98 # elif defined(VGA_ppc32)
99 regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_CIA;
100 regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_GPR1;
101 regs->misc.PPC32.r_lr
102 = VG_(threads)[tid].arch.vex.guest_LR;
103 # elif defined(VGA_ppc64)
104 regs->r_pc = VG_(threads)[tid].arch.vex.guest_CIA;
105 regs->r_sp = VG_(threads)[tid].arch.vex.guest_GPR1;
106 regs->misc.PPC64.r_lr
107 = VG_(threads)[tid].arch.vex.guest_LR;
108 # elif defined(VGA_arm)
109 regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_R15;
110 regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_R13;
112 = VG_(threads)[tid].arch.vex.guest_R14;
114 = VG_(threads)[tid].arch.vex.guest_R12;
116 = VG_(threads)[tid].arch.vex.guest_R11;
118 # error "Unknown arch"
123 void VG_(set_syscall_return_shadows) ( ThreadId tid,
124 /* shadow vals for the result */
125 UWord s1res, UWord s2res,
126 /* shadow vals for the error val */
127 UWord s1err, UWord s2err )
129 # if defined(VGP_x86_linux) || defined (VGP_x86_l4re)
130 VG_(threads)[tid].arch.vex_shadow1.guest_EAX = s1res;
131 VG_(threads)[tid].arch.vex_shadow2.guest_EAX = s2res;
132 # elif defined(VGP_amd64_linux)
133 VG_(threads)[tid].arch.vex_shadow1.guest_RAX = s1res;
134 VG_(threads)[tid].arch.vex_shadow2.guest_RAX = s2res;
135 # elif defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
136 VG_(threads)[tid].arch.vex_shadow1.guest_GPR3 = s1res;
137 VG_(threads)[tid].arch.vex_shadow2.guest_GPR3 = s2res;
138 # elif defined(VGP_arm_linux)
139 VG_(threads)[tid].arch.vex_shadow1.guest_R0 = s1res;
140 VG_(threads)[tid].arch.vex_shadow2.guest_R0 = s2res;
141 # elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
142 VG_(threads)[tid].arch.vex_shadow1.guest_GPR3 = s1res;
143 VG_(threads)[tid].arch.vex_shadow2.guest_GPR3 = s2res;
144 VG_(threads)[tid].arch.vex_shadow1.guest_GPR4 = s1err;
145 VG_(threads)[tid].arch.vex_shadow2.guest_GPR4 = s2err;
146 # elif defined(VGO_darwin)
147 // GrP fixme darwin syscalls may return more values (2 registers plus error)
149 # error "Unknown plat"
154 VG_(get_shadow_regs_area) ( ThreadId tid,
156 /*SRC*/Int shadowNo, PtrdiffT offset, SizeT size )
160 vg_assert(shadowNo == 0 || shadowNo == 1 || shadowNo == 2);
161 vg_assert(VG_(is_valid_tid)(tid));
163 vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
164 vg_assert(offset + size <= sizeof(VexGuestArchState));
166 tst = & VG_(threads)[tid];
169 case 0: src = (void*)(((Addr)&(tst->arch.vex)) + offset); break;
170 case 1: src = (void*)(((Addr)&(tst->arch.vex_shadow1)) + offset); break;
171 case 2: src = (void*)(((Addr)&(tst->arch.vex_shadow2)) + offset); break;
173 tl_assert(src != NULL);
174 VG_(memcpy)( dst, src, size);
178 VG_(set_shadow_regs_area) ( ThreadId tid,
179 /*DST*/Int shadowNo, PtrdiffT offset, SizeT size,
180 /*SRC*/const UChar* src )
184 vg_assert(shadowNo == 0 || shadowNo == 1 || shadowNo == 2);
185 vg_assert(VG_(is_valid_tid)(tid));
187 vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
188 vg_assert(offset + size <= sizeof(VexGuestArchState));
190 tst = & VG_(threads)[tid];
193 case 0: dst = (void*)(((Addr)&(tst->arch.vex)) + offset); break;
194 case 1: dst = (void*)(((Addr)&(tst->arch.vex_shadow1)) + offset); break;
195 case 2: dst = (void*)(((Addr)&(tst->arch.vex_shadow2)) + offset); break;
197 tl_assert(dst != NULL);
198 VG_(memcpy)( dst, src, size);
202 static void apply_to_GPs_of_tid(VexGuestArchState* vex, void (*f)(Addr))
205 (*f)(vex->guest_EAX);
206 (*f)(vex->guest_ECX);
207 (*f)(vex->guest_EDX);
208 (*f)(vex->guest_EBX);
209 (*f)(vex->guest_ESI);
210 (*f)(vex->guest_EDI);
211 (*f)(vex->guest_ESP);
212 (*f)(vex->guest_EBP);
213 #elif defined(VGA_amd64)
214 (*f)(vex->guest_RAX);
215 (*f)(vex->guest_RCX);
216 (*f)(vex->guest_RDX);
217 (*f)(vex->guest_RBX);
218 (*f)(vex->guest_RSI);
219 (*f)(vex->guest_RDI);
220 (*f)(vex->guest_RSP);
221 (*f)(vex->guest_RBP);
224 (*f)(vex->guest_R10);
225 (*f)(vex->guest_R11);
226 (*f)(vex->guest_R12);
227 (*f)(vex->guest_R13);
228 (*f)(vex->guest_R14);
229 (*f)(vex->guest_R15);
230 #elif defined(VGA_ppc32) || defined(VGA_ppc64)
231 (*f)(vex->guest_GPR0);
232 (*f)(vex->guest_GPR1);
233 (*f)(vex->guest_GPR2);
234 (*f)(vex->guest_GPR3);
235 (*f)(vex->guest_GPR4);
236 (*f)(vex->guest_GPR5);
237 (*f)(vex->guest_GPR6);
238 (*f)(vex->guest_GPR7);
239 (*f)(vex->guest_GPR8);
240 (*f)(vex->guest_GPR9);
241 (*f)(vex->guest_GPR10);
242 (*f)(vex->guest_GPR11);
243 (*f)(vex->guest_GPR12);
244 (*f)(vex->guest_GPR13);
245 (*f)(vex->guest_GPR14);
246 (*f)(vex->guest_GPR15);
247 (*f)(vex->guest_GPR16);
248 (*f)(vex->guest_GPR17);
249 (*f)(vex->guest_GPR18);
250 (*f)(vex->guest_GPR19);
251 (*f)(vex->guest_GPR20);
252 (*f)(vex->guest_GPR21);
253 (*f)(vex->guest_GPR22);
254 (*f)(vex->guest_GPR23);
255 (*f)(vex->guest_GPR24);
256 (*f)(vex->guest_GPR25);
257 (*f)(vex->guest_GPR26);
258 (*f)(vex->guest_GPR27);
259 (*f)(vex->guest_GPR28);
260 (*f)(vex->guest_GPR29);
261 (*f)(vex->guest_GPR30);
262 (*f)(vex->guest_GPR31);
263 (*f)(vex->guest_CTR);
265 #elif defined(VGA_arm)
275 (*f)(vex->guest_R10);
276 (*f)(vex->guest_R11);
277 (*f)(vex->guest_R12);
278 (*f)(vex->guest_R13);
279 (*f)(vex->guest_R14);
286 void VG_(apply_to_GP_regs)(void (*f)(UWord))
290 for (tid = 1; tid < VG_N_THREADS; tid++) {
291 if (VG_(is_valid_tid)(tid)) {
292 ThreadState* tst = VG_(get_ThreadState)(tid);
293 apply_to_GPs_of_tid(&(tst->arch.vex), f);
298 void VG_(thread_stack_reset_iter)(/*OUT*/ThreadId* tid)
300 *tid = (ThreadId)(-1);
303 Bool VG_(thread_stack_next)(/*MOD*/ThreadId* tid,
304 /*OUT*/Addr* stack_min,
305 /*OUT*/Addr* stack_max)
308 for (i = (*tid)+1; i < VG_N_THREADS; i++) {
309 if (i == VG_INVALID_THREADID)
311 if (VG_(threads)[i].status != VgTs_Empty) {
313 *stack_min = VG_(get_SP)(i);
314 *stack_max = VG_(threads)[i].client_stack_highest_word;
321 Addr VG_(thread_get_stack_max)(ThreadId tid)
323 vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
324 vg_assert(VG_(threads)[tid].status != VgTs_Empty);
325 return VG_(threads)[tid].client_stack_highest_word;
328 SizeT VG_(thread_get_stack_size)(ThreadId tid)
330 vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
331 vg_assert(VG_(threads)[tid].status != VgTs_Empty);
332 return VG_(threads)[tid].client_stack_szB;
335 //-------------------------------------------------------------
336 /* Details about the capabilities of the underlying (host) CPU. These
337 details are acquired by (1) enquiring with the CPU at startup, or
338 (2) from the AT_SYSINFO entries the kernel gave us (ppc32 cache
339 line size). It's a bit nasty in the sense that there's no obvious
340 way to stop uses of some of this info before it's ready to go.
342 Current dependencies are:
344 x86: initially: call VG_(machine_get_hwcaps)
346 then safe to use VG_(machine_get_VexArchInfo)
347 and VG_(machine_x86_have_mxcsr)
349 amd64: initially: call VG_(machine_get_hwcaps)
351 then safe to use VG_(machine_get_VexArchInfo)
353 ppc32: initially: call VG_(machine_get_hwcaps)
354 call VG_(machine_ppc32_set_clszB)
356 then safe to use VG_(machine_get_VexArchInfo)
357 and VG_(machine_ppc32_has_FP)
358 and VG_(machine_ppc32_has_VMX)
360 ppc64: initially: call VG_(machine_get_hwcaps)
361 call VG_(machine_ppc64_set_clszB)
363 then safe to use VG_(machine_get_VexArchInfo)
364 and VG_(machine_ppc64_has_VMX)
366 VG_(machine_get_hwcaps) may use signals (although it attempts to
367 leave signal state unchanged) and therefore should only be
368 called before m_main sets up the client's signal state.
371 /* --------- State --------- */
372 static Bool hwcaps_done = False;
374 /* --- all archs --- */
376 static VexArchInfo vai;
379 UInt VG_(machine_x86_have_mxcsr) = 0;
381 #if defined(VGA_ppc32)
382 UInt VG_(machine_ppc32_has_FP) = 0;
383 UInt VG_(machine_ppc32_has_VMX) = 0;
385 #if defined(VGA_ppc64)
386 ULong VG_(machine_ppc64_has_VMX) = 0;
390 /* Determine what insn set and insn set variant the host has, and
391 record it. To be called once at system startup. Returns False if
392 this a CPU incapable of running Valgrind. */
394 #if defined(VGA_ppc32) || defined(VGA_ppc64)
395 #include <setjmp.h> // For jmp_buf
396 static jmp_buf env_unsup_insn;
397 static void handler_unsup_insn ( Int x ) { __builtin_longjmp(env_unsup_insn,1); }
400 Bool VG_(machine_get_hwcaps)( void )
402 vg_assert(hwcaps_done == False);
405 // Whack default settings into vai, so that we only need to fill in
406 // any interesting bits.
407 LibVEX_default_VexArchInfo(&vai);
410 { Bool have_sse1, have_sse2, have_cx8;
411 UInt eax, ebx, ecx, edx;
413 if (!VG_(has_cpuid)())
414 /* we can't do cpuid at all. Give up. */
417 VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
419 /* we can't ask for cpuid(x) for x > 0. Give up. */
422 /* get capabilities bits into edx */
423 VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);
425 have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
426 have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */
428 /* cmpxchg8b is a minimum requirement now; if we don't have it we
429 must simply give up. But all CPUs since Pentium-I have it, so
430 that doesn't seem like much of a restriction. */
431 have_cx8 = (edx & (1<<8)) != 0; /* True => have cmpxchg8b */
435 if (have_sse2 && have_sse1) {
437 vai.hwcaps = VEX_HWCAPS_X86_SSE1;
438 vai.hwcaps |= VEX_HWCAPS_X86_SSE2;
439 VG_(machine_x86_have_mxcsr) = 1;
445 vai.hwcaps = VEX_HWCAPS_X86_SSE1;
446 VG_(machine_x86_have_mxcsr) = 1;
451 vai.hwcaps = 0; /*baseline - no sse at all*/
452 VG_(machine_x86_have_mxcsr) = 0;
456 #elif defined(VGA_amd64)
457 { Bool have_sse1, have_sse2, have_sse3, have_cx8, have_cx16;
458 UInt eax, ebx, ecx, edx;
460 if (!VG_(has_cpuid)())
461 /* we can't do cpuid at all. Give up. */
464 VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
466 /* we can't ask for cpuid(x) for x > 0. Give up. */
469 /* get capabilities bits into edx */
470 VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);
472 have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
473 have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */
474 have_sse3 = (ecx & (1<<0)) != 0; /* True => have sse3 insns */
476 /* cmpxchg8b is a minimum requirement now; if we don't have it we
477 must simply give up. But all CPUs since Pentium-I have it, so
478 that doesn't seem like much of a restriction. */
479 have_cx8 = (edx & (1<<8)) != 0; /* True => have cmpxchg8b */
483 /* on amd64 we tolerate older cpus, which don't have cmpxchg16b */
484 have_cx16 = (ecx & (1<<13)) != 0; /* True => have cmpxchg16b */
487 vai.hwcaps = (have_sse3 ? VEX_HWCAPS_AMD64_SSE3 : 0)
488 | (have_cx16 ? VEX_HWCAPS_AMD64_CX16 : 0);
492 #elif defined(VGA_ppc32)
494 /* Find out which subset of the ppc32 instruction set is supported by
495 verifying whether various ppc32 instructions generate a SIGILL
496 or a SIGFPE. An alternative approach is to check the AT_HWCAP and
497 AT_PLATFORM entries in the ELF auxiliary table -- see also
498 the_iifii.client_auxv in m_main.c.
500 vki_sigset_t saved_set, tmp_set;
501 vki_sigaction_fromK_t saved_sigill_act, saved_sigfpe_act;
502 vki_sigaction_toK_t tmp_sigill_act, tmp_sigfpe_act;
504 volatile Bool have_F, have_V, have_FX, have_GX;
507 /* This is a kludge. Really we ought to back-convert saved_act
508 into a toK_t using VG_(convert_sigaction_fromK_to_toK), but
509 since that's a no-op on all ppc32 platforms so far supported,
510 it's not worth the typing effort. At least include most basic
512 vg_assert(sizeof(vki_sigaction_fromK_t) == sizeof(vki_sigaction_toK_t));
514 VG_(sigemptyset)(&tmp_set);
515 VG_(sigaddset)(&tmp_set, VKI_SIGILL);
516 VG_(sigaddset)(&tmp_set, VKI_SIGFPE);
518 r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
521 r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
523 tmp_sigill_act = saved_sigill_act;
525 r = VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
527 tmp_sigfpe_act = saved_sigfpe_act;
529 /* NODEFER: signal handler does not return (from the kernel's point of
530 view), hence if it is to successfully catch a signal more than once,
531 we need the NODEFER flag. */
532 tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
533 tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
534 tmp_sigill_act.sa_flags |= VKI_SA_NODEFER;
535 tmp_sigill_act.ksa_handler = handler_unsup_insn;
536 r = VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
539 tmp_sigfpe_act.sa_flags &= ~VKI_SA_RESETHAND;
540 tmp_sigfpe_act.sa_flags &= ~VKI_SA_SIGINFO;
541 tmp_sigfpe_act.sa_flags |= VKI_SA_NODEFER;
542 tmp_sigfpe_act.ksa_handler = handler_unsup_insn;
543 r = VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);
546 /* standard FP insns */
548 if (__builtin_setjmp(env_unsup_insn)) {
551 __asm__ __volatile__(".long 0xFC000090"); /*fmr 0,0 */
556 if (__builtin_setjmp(env_unsup_insn)) {
559 /* Unfortunately some older assemblers don't speak Altivec (or
560 choose not to), so to be safe we directly emit the 32-bit
561 word corresponding to "vor 0,0,0". This fixes a build
562 problem that happens on Debian 3.1 (ppc32), and probably
563 various other places. */
564 __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
567 /* General-Purpose optional (fsqrt, fsqrts) */
569 if (__builtin_setjmp(env_unsup_insn)) {
572 __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0 */
575 /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
577 if (__builtin_setjmp(env_unsup_insn)) {
580 __asm__ __volatile__(".long 0xFC000034"); /* frsqrte 0,0 */
583 r = VG_(sigaction)(VKI_SIGILL, &saved_sigill_act, NULL);
585 r = VG_(sigaction)(VKI_SIGFPE, &saved_sigfpe_act, NULL);
587 r = VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
589 VG_(debugLog)(1, "machine", "F %d V %d FX %d GX %d\n",
590 (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
591 /* Make FP a prerequisite for VMX (bogusly so), and for FX and GX. */
592 if (have_V && !have_F)
594 if (have_FX && !have_F)
596 if (have_GX && !have_F)
599 VG_(machine_ppc32_has_FP) = have_F ? 1 : 0;
600 VG_(machine_ppc32_has_VMX) = have_V ? 1 : 0;
605 if (have_F) vai.hwcaps |= VEX_HWCAPS_PPC32_F;
606 if (have_V) vai.hwcaps |= VEX_HWCAPS_PPC32_V;
607 if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC32_FX;
608 if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC32_GX;
610 /* But we're not done yet: VG_(machine_ppc32_set_clszB) must be
611 called before we're ready to go. */
615 #elif defined(VGA_ppc64)
617 /* Same instruction set detection algorithm as for ppc32. */
618 vki_sigset_t saved_set, tmp_set;
619 vki_sigaction_fromK_t saved_sigill_act, saved_sigfpe_act;
620 vki_sigaction_toK_t tmp_sigill_act, tmp_sigfpe_act;
622 volatile Bool have_F, have_V, have_FX, have_GX;
625 /* This is a kludge. Really we ought to back-convert saved_act
626 into a toK_t using VG_(convert_sigaction_fromK_to_toK), but
627 since that's a no-op on all ppc64 platforms so far supported,
628 it's not worth the typing effort. At least include most basic
630 vg_assert(sizeof(vki_sigaction_fromK_t) == sizeof(vki_sigaction_toK_t));
632 VG_(sigemptyset)(&tmp_set);
633 VG_(sigaddset)(&tmp_set, VKI_SIGILL);
634 VG_(sigaddset)(&tmp_set, VKI_SIGFPE);
636 r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
639 r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
641 tmp_sigill_act = saved_sigill_act;
643 VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
644 tmp_sigfpe_act = saved_sigfpe_act;
646 /* NODEFER: signal handler does not return (from the kernel's point of
647 view), hence if it is to successfully catch a signal more than once,
648 we need the NODEFER flag. */
649 tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
650 tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
651 tmp_sigill_act.sa_flags |= VKI_SA_NODEFER;
652 tmp_sigill_act.ksa_handler = handler_unsup_insn;
653 VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
655 tmp_sigfpe_act.sa_flags &= ~VKI_SA_RESETHAND;
656 tmp_sigfpe_act.sa_flags &= ~VKI_SA_SIGINFO;
657 tmp_sigfpe_act.sa_flags |= VKI_SA_NODEFER;
658 tmp_sigfpe_act.ksa_handler = handler_unsup_insn;
659 VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);
661 /* standard FP insns */
663 if (__builtin_setjmp(env_unsup_insn)) {
666 __asm__ __volatile__("fmr 0,0");
671 if (__builtin_setjmp(env_unsup_insn)) {
674 __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
677 /* General-Purpose optional (fsqrt, fsqrts) */
679 if (__builtin_setjmp(env_unsup_insn)) {
682 __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0*/
685 /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
687 if (__builtin_setjmp(env_unsup_insn)) {
690 __asm__ __volatile__(".long 0xFC000034"); /*frsqrte 0,0*/
693 VG_(sigaction)(VKI_SIGILL, &saved_sigill_act, NULL);
694 VG_(sigaction)(VKI_SIGFPE, &saved_sigfpe_act, NULL);
695 VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
696 VG_(debugLog)(1, "machine", "F %d V %d FX %d GX %d\n",
697 (Int)have_F, (Int)have_V, (Int)have_FX, (Int)have_GX);
698 /* on ppc64, if we don't even have FP, just give up. */
702 VG_(machine_ppc64_has_VMX) = have_V ? 1 : 0;
707 if (have_V) vai.hwcaps |= VEX_HWCAPS_PPC64_V;
708 if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC64_FX;
709 if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC64_GX;
711 /* But we're not done yet: VG_(machine_ppc64_set_clszB) must be
712 called before we're ready to go. */
716 #elif defined(VGA_arm)
724 # error "Unknown arch"
728 /* Notify host cpu cache line size. */
729 #if defined(VGA_ppc32)
730 void VG_(machine_ppc32_set_clszB)( Int szB )
732 vg_assert(hwcaps_done);
734 /* Either the value must not have been set yet (zero) or we can
735 tolerate it being set to the same value multiple times, as the
736 stack scanning logic in m_main is a bit stupid. */
737 vg_assert(vai.ppc_cache_line_szB == 0
738 || vai.ppc_cache_line_szB == szB);
740 vg_assert(szB == 32 || szB == 64 || szB == 128);
741 vai.ppc_cache_line_szB = szB;
746 /* Notify host cpu cache line size. */
747 #if defined(VGA_ppc64)
748 void VG_(machine_ppc64_set_clszB)( Int szB )
750 vg_assert(hwcaps_done);
752 /* Either the value must not have been set yet (zero) or we can
753 tolerate it being set to the same value multiple times, as the
754 stack scanning logic in m_main is a bit stupid. */
755 vg_assert(vai.ppc_cache_line_szB == 0
756 || vai.ppc_cache_line_szB == szB);
758 vg_assert(szB == 32 || szB == 64 || szB == 128);
759 vai.ppc_cache_line_szB = szB;
764 /* Fetch host cpu info, once established. */
765 void VG_(machine_get_VexArchInfo)( /*OUT*/VexArch* pVa,
766 /*OUT*/VexArchInfo* pVai )
768 vg_assert(hwcaps_done);
770 if (pVai) *pVai = vai;
774 // Given a pointer to a function as obtained by "& functionname" in C,
775 // produce a pointer to the actual entry point for the function.
776 void* VG_(fnptr_to_fnentry)( void* f )
778 #if defined(VGP_x86_linux) || defined(VGP_amd64_linux) \
779 || defined(VGP_arm_linux) \
780 || defined(VGP_ppc32_linux) || defined(VGO_darwin) \
783 #elif defined(VGP_ppc64_linux) || defined(VGP_ppc32_aix5) \
784 || defined(VGP_ppc64_aix5)
785 /* All other ppc variants use the AIX scheme, in which f is a
786 pointer to a 3-word function descriptor, of which the first word
787 is the entry address. */
788 UWord* descr = (UWord*)f;
789 return (void*)(descr[0]);
791 # error "Unknown platform"
795 /*--------------------------------------------------------------------*/
797 /*--------------------------------------------------------------------*/