1 /*--------------------------------------------------------------------*/
2 /*--- Machine-related stuff. m_machine.c ---*/
3 /*--------------------------------------------------------------------*/
6 This file is part of Valgrind, a dynamic binary instrumentation
9 Copyright (C) 2000-2010 Julian Seward
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_core_basics.h"
31 #include "pub_core_vki.h"
32 #include "pub_core_libcsetjmp.h" // setjmp facilities
33 #include "pub_core_threadstate.h"
34 #include "pub_core_libcassert.h"
35 #include "pub_core_libcbase.h"
36 #include "pub_core_libcfile.h"
37 #include "pub_core_mallocfree.h"
38 #include "pub_core_machine.h"
39 #include "pub_core_cpuid.h"
40 #include "pub_core_libcsignal.h" // for ppc32 messing with SIGILL and SIGFPE
41 #include "pub_core_debuglog.h"
44 #define INSTR_PTR(regs) ((regs).vex.VG_INSTR_PTR)
45 #define STACK_PTR(regs) ((regs).vex.VG_STACK_PTR)
46 #define FRAME_PTR(regs) ((regs).vex.VG_FRAME_PTR)
48 Addr VG_(get_IP) ( ThreadId tid ) {
49 return INSTR_PTR( VG_(threads)[tid].arch );
51 Addr VG_(get_SP) ( ThreadId tid ) {
52 return STACK_PTR( VG_(threads)[tid].arch );
54 Addr VG_(get_FP) ( ThreadId tid ) {
55 return FRAME_PTR( VG_(threads)[tid].arch );
58 void VG_(set_IP) ( ThreadId tid, Addr ip ) {
59 INSTR_PTR( VG_(threads)[tid].arch ) = ip;
61 void VG_(set_SP) ( ThreadId tid, Addr sp ) {
62 STACK_PTR( VG_(threads)[tid].arch ) = sp;
65 void VG_(get_UnwindStartRegs) ( /*OUT*/UnwindStartRegs* regs,
69 regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_EIP;
70 regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_ESP;
72 = VG_(threads)[tid].arch.vex.guest_EBP;
73 # elif defined(VGA_amd64)
74 regs->r_pc = VG_(threads)[tid].arch.vex.guest_RIP;
75 regs->r_sp = VG_(threads)[tid].arch.vex.guest_RSP;
76 regs->misc.AMD64.r_rbp
77 = VG_(threads)[tid].arch.vex.guest_RBP;
78 # elif defined(VGA_ppc32)
79 regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_CIA;
80 regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_GPR1;
82 = VG_(threads)[tid].arch.vex.guest_LR;
83 # elif defined(VGA_ppc64)
84 regs->r_pc = VG_(threads)[tid].arch.vex.guest_CIA;
85 regs->r_sp = VG_(threads)[tid].arch.vex.guest_GPR1;
87 = VG_(threads)[tid].arch.vex.guest_LR;
88 # elif defined(VGA_arm)
89 regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_R15T;
90 regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_R13;
92 = VG_(threads)[tid].arch.vex.guest_R14;
94 = VG_(threads)[tid].arch.vex.guest_R12;
96 = VG_(threads)[tid].arch.vex.guest_R11;
98 = VG_(threads)[tid].arch.vex.guest_R7;
99 # elif defined(VGA_s390x)
100 regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_IA;
101 regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_SP;
102 regs->misc.S390X.r_fp
103 = VG_(threads)[tid].arch.vex.guest_r11;
104 regs->misc.S390X.r_lr
105 = VG_(threads)[tid].arch.vex.guest_r14;
107 # error "Unknown arch"
112 void VG_(set_syscall_return_shadows) ( ThreadId tid,
113 /* shadow vals for the result */
114 UWord s1res, UWord s2res,
115 /* shadow vals for the error val */
116 UWord s1err, UWord s2err )
118 # if defined(VGP_x86_linux) || defined (VGP_x86_l4re)
119 VG_(threads)[tid].arch.vex_shadow1.guest_EAX = s1res;
120 VG_(threads)[tid].arch.vex_shadow2.guest_EAX = s2res;
121 # elif defined(VGP_amd64_linux)
122 VG_(threads)[tid].arch.vex_shadow1.guest_RAX = s1res;
123 VG_(threads)[tid].arch.vex_shadow2.guest_RAX = s2res;
124 # elif defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
125 VG_(threads)[tid].arch.vex_shadow1.guest_GPR3 = s1res;
126 VG_(threads)[tid].arch.vex_shadow2.guest_GPR3 = s2res;
127 # elif defined(VGP_arm_linux)
128 VG_(threads)[tid].arch.vex_shadow1.guest_R0 = s1res;
129 VG_(threads)[tid].arch.vex_shadow2.guest_R0 = s2res;
130 # elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
131 VG_(threads)[tid].arch.vex_shadow1.guest_GPR3 = s1res;
132 VG_(threads)[tid].arch.vex_shadow2.guest_GPR3 = s2res;
133 VG_(threads)[tid].arch.vex_shadow1.guest_GPR4 = s1err;
134 VG_(threads)[tid].arch.vex_shadow2.guest_GPR4 = s2err;
135 # elif defined(VGO_darwin)
136 // GrP fixme darwin syscalls may return more values (2 registers plus error)
137 # elif defined(VGP_s390x_linux)
138 VG_(threads)[tid].arch.vex_shadow1.guest_r2 = s1res;
139 VG_(threads)[tid].arch.vex_shadow2.guest_r2 = s2res;
141 # error "Unknown plat"
146 VG_(get_shadow_regs_area) ( ThreadId tid,
148 /*SRC*/Int shadowNo, PtrdiffT offset, SizeT size )
152 vg_assert(shadowNo == 0 || shadowNo == 1 || shadowNo == 2);
153 vg_assert(VG_(is_valid_tid)(tid));
155 vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
156 vg_assert(offset + size <= sizeof(VexGuestArchState));
158 tst = & VG_(threads)[tid];
161 case 0: src = (void*)(((Addr)&(tst->arch.vex)) + offset); break;
162 case 1: src = (void*)(((Addr)&(tst->arch.vex_shadow1)) + offset); break;
163 case 2: src = (void*)(((Addr)&(tst->arch.vex_shadow2)) + offset); break;
165 tl_assert(src != NULL);
166 VG_(memcpy)( dst, src, size);
170 VG_(set_shadow_regs_area) ( ThreadId tid,
171 /*DST*/Int shadowNo, PtrdiffT offset, SizeT size,
172 /*SRC*/const UChar* src )
176 vg_assert(shadowNo == 0 || shadowNo == 1 || shadowNo == 2);
177 vg_assert(VG_(is_valid_tid)(tid));
179 vg_assert(0 <= offset && offset < sizeof(VexGuestArchState));
180 vg_assert(offset + size <= sizeof(VexGuestArchState));
182 tst = & VG_(threads)[tid];
185 case 0: dst = (void*)(((Addr)&(tst->arch.vex)) + offset); break;
186 case 1: dst = (void*)(((Addr)&(tst->arch.vex_shadow1)) + offset); break;
187 case 2: dst = (void*)(((Addr)&(tst->arch.vex_shadow2)) + offset); break;
189 tl_assert(dst != NULL);
190 VG_(memcpy)( dst, src, size);
194 static void apply_to_GPs_of_tid(VexGuestArchState* vex, void (*f)(Addr))
197 (*f)(vex->guest_EAX);
198 (*f)(vex->guest_ECX);
199 (*f)(vex->guest_EDX);
200 (*f)(vex->guest_EBX);
201 (*f)(vex->guest_ESI);
202 (*f)(vex->guest_EDI);
203 (*f)(vex->guest_ESP);
204 (*f)(vex->guest_EBP);
205 #elif defined(VGA_amd64)
206 (*f)(vex->guest_RAX);
207 (*f)(vex->guest_RCX);
208 (*f)(vex->guest_RDX);
209 (*f)(vex->guest_RBX);
210 (*f)(vex->guest_RSI);
211 (*f)(vex->guest_RDI);
212 (*f)(vex->guest_RSP);
213 (*f)(vex->guest_RBP);
216 (*f)(vex->guest_R10);
217 (*f)(vex->guest_R11);
218 (*f)(vex->guest_R12);
219 (*f)(vex->guest_R13);
220 (*f)(vex->guest_R14);
221 (*f)(vex->guest_R15);
222 #elif defined(VGA_ppc32) || defined(VGA_ppc64)
223 (*f)(vex->guest_GPR0);
224 (*f)(vex->guest_GPR1);
225 (*f)(vex->guest_GPR2);
226 (*f)(vex->guest_GPR3);
227 (*f)(vex->guest_GPR4);
228 (*f)(vex->guest_GPR5);
229 (*f)(vex->guest_GPR6);
230 (*f)(vex->guest_GPR7);
231 (*f)(vex->guest_GPR8);
232 (*f)(vex->guest_GPR9);
233 (*f)(vex->guest_GPR10);
234 (*f)(vex->guest_GPR11);
235 (*f)(vex->guest_GPR12);
236 (*f)(vex->guest_GPR13);
237 (*f)(vex->guest_GPR14);
238 (*f)(vex->guest_GPR15);
239 (*f)(vex->guest_GPR16);
240 (*f)(vex->guest_GPR17);
241 (*f)(vex->guest_GPR18);
242 (*f)(vex->guest_GPR19);
243 (*f)(vex->guest_GPR20);
244 (*f)(vex->guest_GPR21);
245 (*f)(vex->guest_GPR22);
246 (*f)(vex->guest_GPR23);
247 (*f)(vex->guest_GPR24);
248 (*f)(vex->guest_GPR25);
249 (*f)(vex->guest_GPR26);
250 (*f)(vex->guest_GPR27);
251 (*f)(vex->guest_GPR28);
252 (*f)(vex->guest_GPR29);
253 (*f)(vex->guest_GPR30);
254 (*f)(vex->guest_GPR31);
255 (*f)(vex->guest_CTR);
257 #elif defined(VGA_arm)
267 (*f)(vex->guest_R10);
268 (*f)(vex->guest_R11);
269 (*f)(vex->guest_R12);
270 (*f)(vex->guest_R13);
271 (*f)(vex->guest_R14);
272 #elif defined(VGA_s390x)
283 (*f)(vex->guest_r10);
284 (*f)(vex->guest_r11);
285 (*f)(vex->guest_r12);
286 (*f)(vex->guest_r13);
287 (*f)(vex->guest_r14);
288 (*f)(vex->guest_r15);
295 void VG_(apply_to_GP_regs)(void (*f)(UWord))
299 for (tid = 1; tid < VG_N_THREADS; tid++) {
300 if (VG_(is_valid_tid)(tid)) {
301 ThreadState* tst = VG_(get_ThreadState)(tid);
302 apply_to_GPs_of_tid(&(tst->arch.vex), f);
307 void VG_(thread_stack_reset_iter)(/*OUT*/ThreadId* tid)
309 *tid = (ThreadId)(-1);
312 Bool VG_(thread_stack_next)(/*MOD*/ThreadId* tid,
313 /*OUT*/Addr* stack_min,
314 /*OUT*/Addr* stack_max)
317 for (i = (*tid)+1; i < VG_N_THREADS; i++) {
318 if (i == VG_INVALID_THREADID)
320 if (VG_(threads)[i].status != VgTs_Empty) {
322 *stack_min = VG_(get_SP)(i);
323 *stack_max = VG_(threads)[i].client_stack_highest_word;
330 Addr VG_(thread_get_stack_max)(ThreadId tid)
332 vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
333 vg_assert(VG_(threads)[tid].status != VgTs_Empty);
334 return VG_(threads)[tid].client_stack_highest_word;
337 SizeT VG_(thread_get_stack_size)(ThreadId tid)
339 vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
340 vg_assert(VG_(threads)[tid].status != VgTs_Empty);
341 return VG_(threads)[tid].client_stack_szB;
344 Addr VG_(thread_get_altstack_min)(ThreadId tid)
346 vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
347 vg_assert(VG_(threads)[tid].status != VgTs_Empty);
348 return (Addr)VG_(threads)[tid].altstack.ss_sp;
351 SizeT VG_(thread_get_altstack_size)(ThreadId tid)
353 vg_assert(0 <= tid && tid < VG_N_THREADS && tid != VG_INVALID_THREADID);
354 vg_assert(VG_(threads)[tid].status != VgTs_Empty);
355 return VG_(threads)[tid].altstack.ss_size;
358 //-------------------------------------------------------------
359 /* Details about the capabilities of the underlying (host) CPU. These
360 details are acquired by (1) enquiring with the CPU at startup, or
361 (2) from the AT_SYSINFO entries the kernel gave us (ppc32 cache
362 line size). It's a bit nasty in the sense that there's no obvious
363 way to stop uses of some of this info before it's ready to go.
365 Current dependencies are:
367 x86: initially: call VG_(machine_get_hwcaps)
369 then safe to use VG_(machine_get_VexArchInfo)
370 and VG_(machine_x86_have_mxcsr)
372 amd64: initially: call VG_(machine_get_hwcaps)
374 then safe to use VG_(machine_get_VexArchInfo)
376 ppc32: initially: call VG_(machine_get_hwcaps)
377 call VG_(machine_ppc32_set_clszB)
379 then safe to use VG_(machine_get_VexArchInfo)
380 and VG_(machine_ppc32_has_FP)
381 and VG_(machine_ppc32_has_VMX)
383 ppc64: initially: call VG_(machine_get_hwcaps)
384 call VG_(machine_ppc64_set_clszB)
386 then safe to use VG_(machine_get_VexArchInfo)
387 and VG_(machine_ppc64_has_VMX)
390 s390x: initially: call VG_(machine_get_hwcaps)
392 then safe to use VG_(machine_get_VexArchInfo)
394 VG_(machine_get_hwcaps) may use signals (although it attempts to
395 leave signal state unchanged) and therefore should only be
396 called before m_main sets up the client's signal state.
399 /* --------- State --------- */
400 static Bool hwcaps_done = False;
402 /* --- all archs --- */
404 static VexArchInfo vai;
407 UInt VG_(machine_x86_have_mxcsr) = 0;
409 #if defined(VGA_ppc32)
410 UInt VG_(machine_ppc32_has_FP) = 0;
411 UInt VG_(machine_ppc32_has_VMX) = 0;
413 #if defined(VGA_ppc64)
414 ULong VG_(machine_ppc64_has_VMX) = 0;
417 Int VG_(machine_arm_archlevel) = 4;
420 /* fixs390: anything for s390x here ? */
422 /* For hwcaps detection on ppc32/64, s390x, and arm we'll need to do SIGILL
423 testing, so we need a VG_MINIMAL_JMP_BUF. */
424 #if defined(VGA_ppc32) || defined(VGA_ppc64) \
425 || defined(VGA_arm) || defined(VGA_s390x)
426 #include "pub_tool_libcsetjmp.h"
427 static VG_MINIMAL_JMP_BUF(env_unsup_insn);
428 static void handler_unsup_insn ( Int x ) {
429 VG_MINIMAL_LONGJMP(env_unsup_insn);
434 /* Helper function for VG_(machine_get_hwcaps), assumes the SIGILL/etc
435 * handlers are installed. Determines the the sizes affected by dcbz
436 * and dcbzl instructions and updates the given VexArchInfo structure
439 * Not very defensive: assumes that as long as the dcbz/dcbzl
440 * instructions don't raise a SIGILL, that they will zero an aligned,
441 * contiguous block of memory of a sensible size. */
442 #if defined(VGA_ppc32) || defined(VGA_ppc64)
443 static void find_ppc_dcbz_sz(VexArchInfo *arch_info)
447 # define MAX_DCBZL_SZB (128) /* largest known effect of dcbzl */
448 char test_block[4*MAX_DCBZL_SZB];
449 char *aligned = test_block;
452 /* round up to next max block size, assumes MAX_DCBZL_SZB is pof2 */
453 aligned = (char *)(((HWord)aligned + MAX_DCBZL_SZB) & ~(MAX_DCBZL_SZB - 1));
454 vg_assert((aligned + MAX_DCBZL_SZB) <= &test_block[sizeof(test_block)]);
456 /* dcbz often clears 32B, although sometimes whatever the native cache
458 VG_(memset)(test_block, 0xff, sizeof(test_block));
459 __asm__ __volatile__("dcbz 0,%0"
461 : "r" (aligned) /*in*/
462 : "memory" /*clobber*/);
463 for (dcbz_szB = 0, i = 0; i < sizeof(test_block); ++i) {
467 vg_assert(dcbz_szB == 32 || dcbz_szB == 64 || dcbz_szB == 128);
469 /* dcbzl clears 128B on G5/PPC970, and usually 32B on other platforms */
470 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
471 dcbzl_szB = 0; /* indicates unsupported */
474 VG_(memset)(test_block, 0xff, sizeof(test_block));
475 /* some older assemblers won't understand the dcbzl instruction
476 * variant, so we directly emit the instruction ourselves */
477 __asm__ __volatile__("mr 9, %0 ; .long 0x7C204FEC" /*dcbzl 0,9*/
479 : "r" (aligned) /*in*/
480 : "memory", "r9" /*clobber*/);
481 for (dcbzl_szB = 0, i = 0; i < sizeof(test_block); ++i) {
485 vg_assert(dcbzl_szB == 32 || dcbzl_szB == 64 || dcbzl_szB == 128);
488 arch_info->ppc_dcbz_szB = dcbz_szB;
489 arch_info->ppc_dcbzl_szB = dcbzl_szB;
491 VG_(debugLog)(1, "machine", "dcbz_szB=%d dcbzl_szB=%d\n",
492 dcbz_szB, dcbzl_szB);
493 # undef MAX_DCBZL_SZB
495 #endif /* defined(VGA_ppc32) || defined(VGA_ppc64) */
499 /* Read /proc/cpuinfo. Look for lines like these
501 processor 0: version = FF, identification = 0117C9, machine = 2064
503 and return the machine model or VEX_S390X_MODEL_INVALID on error. */
505 static UInt VG_(get_machine_model)(void)
507 static struct model_map {
511 { "2064", VEX_S390X_MODEL_Z900 },
512 { "2066", VEX_S390X_MODEL_Z800 },
513 { "2084", VEX_S390X_MODEL_Z990 },
514 { "2086", VEX_S390X_MODEL_Z890 },
515 { "2094", VEX_S390X_MODEL_Z9_EC },
516 { "2096", VEX_S390X_MODEL_Z9_BC },
517 { "2097", VEX_S390X_MODEL_Z10_EC },
518 { "2098", VEX_S390X_MODEL_Z10_BC },
519 { "2817", VEX_S390X_MODEL_Z196 },
524 SizeT num_bytes, file_buf_size;
525 HChar *p, *m, *model_name, *file_buf;
527 /* Slurp contents of /proc/cpuinfo into FILE_BUF */
528 fd = VG_(open)( "/proc/cpuinfo", 0, VKI_S_IRUSR );
529 if ( sr_isError(fd) ) return VEX_S390X_MODEL_INVALID;
533 /* Determine the size of /proc/cpuinfo.
534 Work around broken-ness in /proc file system implementation.
535 fstat returns a zero size for /proc/cpuinfo although it is
536 claimed to be a regular file. */
538 file_buf_size = 1000;
539 file_buf = VG_(malloc)("cpuinfo", file_buf_size + 1);
541 n = VG_(read)(fh, file_buf, file_buf_size);
545 if (n < file_buf_size) break; /* reached EOF */
548 if (n < 0) num_bytes = 0; /* read error; ignore contents */
550 if (num_bytes > file_buf_size) {
551 VG_(free)( file_buf );
552 VG_(lseek)( fh, 0, VKI_SEEK_SET );
553 file_buf = VG_(malloc)( "cpuinfo", num_bytes + 1 );
554 n = VG_(read)( fh, file_buf, num_bytes );
555 if (n < 0) num_bytes = 0;
558 file_buf[num_bytes] = '\0';
562 model = VEX_S390X_MODEL_INVALID;
563 for (p = file_buf; *p; ++p) {
564 /* Beginning of line */
565 if (VG_(strncmp)( p, "processor", sizeof "processor" - 1 ) != 0) continue;
567 m = VG_(strstr)( p, "machine" );
568 if (m == NULL) continue;
570 p = m + sizeof "machine" - 1;
571 while ( VG_(isspace)( *p ) || *p == '=') {
572 if (*p == '\n') goto next_line;
577 for (n = 0; n < sizeof model_map / sizeof model_map[0]; ++n) {
578 struct model_map *mm = model_map + n;
579 SizeT len = VG_(strlen)( mm->name );
580 if ( VG_(strncmp)( mm->name, model_name, len ) == 0 &&
581 VG_(isspace)( model_name[len] )) {
582 if (mm->id < model) model = mm->id;
583 p = model_name + len;
587 /* Skip until end-of-line */
593 VG_(free)( file_buf );
594 VG_(debugLog)(1, "machine", "model = %s\n", model_map[model].name);
599 #endif /* VGA_s390x */
601 /* Determine what insn set and insn set variant the host has, and
602 record it. To be called once at system startup. Returns False if
603 this a CPU incapable of running Valgrind. */
605 Bool VG_(machine_get_hwcaps)( void )
607 vg_assert(hwcaps_done == False);
610 // Whack default settings into vai, so that we only need to fill in
611 // any interesting bits.
612 LibVEX_default_VexArchInfo(&vai);
615 { Bool have_sse1, have_sse2, have_cx8, have_lzcnt;
616 UInt eax, ebx, ecx, edx, max_extended;
620 if (!VG_(has_cpuid)())
621 /* we can't do cpuid at all. Give up. */
624 VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
626 /* we can't ask for cpuid(x) for x > 0. Give up. */
629 /* Get processor ID string, and max basic/extended index
631 VG_(memcpy)(&vstr[0], &ebx, 4);
632 VG_(memcpy)(&vstr[4], &edx, 4);
633 VG_(memcpy)(&vstr[8], &ecx, 4);
636 VG_(cpuid)(0x80000000, &eax, &ebx, &ecx, &edx);
639 /* get capabilities bits into edx */
640 VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);
642 have_sse1 = (edx & (1<<25)) != 0; /* True => have sse insns */
643 have_sse2 = (edx & (1<<26)) != 0; /* True => have sse2 insns */
645 /* cmpxchg8b is a minimum requirement now; if we don't have it we
646 must simply give up. But all CPUs since Pentium-I have it, so
647 that doesn't seem like much of a restriction. */
648 have_cx8 = (edx & (1<<8)) != 0; /* True => have cmpxchg8b */
652 /* Figure out if this is an AMD that can do LZCNT. */
654 if (0 == VG_(strcmp)(vstr, "AuthenticAMD")
655 && max_extended >= 0x80000001) {
656 VG_(cpuid)(0x80000001, &eax, &ebx, &ecx, &edx);
657 have_lzcnt = (ecx & (1<<5)) != 0; /* True => have LZCNT */
660 if (have_sse2 && have_sse1) {
662 vai.hwcaps = VEX_HWCAPS_X86_SSE1;
663 vai.hwcaps |= VEX_HWCAPS_X86_SSE2;
665 vai.hwcaps |= VEX_HWCAPS_X86_LZCNT;
666 VG_(machine_x86_have_mxcsr) = 1;
672 vai.hwcaps = VEX_HWCAPS_X86_SSE1;
673 VG_(machine_x86_have_mxcsr) = 1;
678 vai.hwcaps = 0; /*baseline - no sse at all*/
679 VG_(machine_x86_have_mxcsr) = 0;
683 #elif defined(VGA_amd64)
684 { Bool have_sse3, have_cx8, have_cx16;
686 UInt eax, ebx, ecx, edx, max_extended;
690 if (!VG_(has_cpuid)())
691 /* we can't do cpuid at all. Give up. */
694 VG_(cpuid)(0, &eax, &ebx, &ecx, &edx);
696 /* we can't ask for cpuid(x) for x > 0. Give up. */
699 /* Get processor ID string, and max basic/extended index
701 VG_(memcpy)(&vstr[0], &ebx, 4);
702 VG_(memcpy)(&vstr[4], &edx, 4);
703 VG_(memcpy)(&vstr[8], &ecx, 4);
706 VG_(cpuid)(0x80000000, &eax, &ebx, &ecx, &edx);
709 /* get capabilities bits into edx */
710 VG_(cpuid)(1, &eax, &ebx, &ecx, &edx);
712 // we assume that SSE1 and SSE2 are available by default
713 have_sse3 = (ecx & (1<<0)) != 0; /* True => have sse3 insns */
718 /* cmpxchg8b is a minimum requirement now; if we don't have it we
719 must simply give up. But all CPUs since Pentium-I have it, so
720 that doesn't seem like much of a restriction. */
721 have_cx8 = (edx & (1<<8)) != 0; /* True => have cmpxchg8b */
725 /* on amd64 we tolerate older cpus, which don't have cmpxchg16b */
726 have_cx16 = (ecx & (1<<13)) != 0; /* True => have cmpxchg16b */
728 /* Figure out if this is an AMD that can do LZCNT. */
730 if (0 == VG_(strcmp)(vstr, "AuthenticAMD")
731 && max_extended >= 0x80000001) {
732 VG_(cpuid)(0x80000001, &eax, &ebx, &ecx, &edx);
733 have_lzcnt = (ecx & (1<<5)) != 0; /* True => have LZCNT */
737 vai.hwcaps = (have_sse3 ? VEX_HWCAPS_AMD64_SSE3 : 0)
738 | (have_cx16 ? VEX_HWCAPS_AMD64_CX16 : 0)
739 | (have_lzcnt ? VEX_HWCAPS_AMD64_LZCNT : 0);
743 #elif defined(VGA_ppc32)
745 /* Find out which subset of the ppc32 instruction set is supported by
746 verifying whether various ppc32 instructions generate a SIGILL
747 or a SIGFPE. An alternative approach is to check the AT_HWCAP and
748 AT_PLATFORM entries in the ELF auxiliary table -- see also
749 the_iifii.client_auxv in m_main.c.
751 vki_sigset_t saved_set, tmp_set;
752 vki_sigaction_fromK_t saved_sigill_act, saved_sigfpe_act;
753 vki_sigaction_toK_t tmp_sigill_act, tmp_sigfpe_act;
755 volatile Bool have_F, have_V, have_FX, have_GX, have_VX;
758 /* This is a kludge. Really we ought to back-convert saved_act
759 into a toK_t using VG_(convert_sigaction_fromK_to_toK), but
760 since that's a no-op on all ppc32 platforms so far supported,
761 it's not worth the typing effort. At least include most basic
763 vg_assert(sizeof(vki_sigaction_fromK_t) == sizeof(vki_sigaction_toK_t));
765 VG_(sigemptyset)(&tmp_set);
766 VG_(sigaddset)(&tmp_set, VKI_SIGILL);
767 VG_(sigaddset)(&tmp_set, VKI_SIGFPE);
769 r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
772 r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
774 tmp_sigill_act = saved_sigill_act;
776 r = VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
778 tmp_sigfpe_act = saved_sigfpe_act;
780 /* NODEFER: signal handler does not return (from the kernel's point of
781 view), hence if it is to successfully catch a signal more than once,
782 we need the NODEFER flag. */
783 tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
784 tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
785 tmp_sigill_act.sa_flags |= VKI_SA_NODEFER;
786 tmp_sigill_act.ksa_handler = handler_unsup_insn;
787 r = VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
790 tmp_sigfpe_act.sa_flags &= ~VKI_SA_RESETHAND;
791 tmp_sigfpe_act.sa_flags &= ~VKI_SA_SIGINFO;
792 tmp_sigfpe_act.sa_flags |= VKI_SA_NODEFER;
793 tmp_sigfpe_act.ksa_handler = handler_unsup_insn;
794 r = VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);
797 /* standard FP insns */
799 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
802 __asm__ __volatile__(".long 0xFC000090"); /*fmr 0,0 */
807 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
810 /* Unfortunately some older assemblers don't speak Altivec (or
811 choose not to), so to be safe we directly emit the 32-bit
812 word corresponding to "vor 0,0,0". This fixes a build
813 problem that happens on Debian 3.1 (ppc32), and probably
814 various other places. */
815 __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
818 /* General-Purpose optional (fsqrt, fsqrts) */
820 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
823 __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0 */
826 /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
828 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
831 __asm__ __volatile__(".long 0xFC000034"); /* frsqrte 0,0 */
834 /* VSX support implies Power ISA 2.06 */
836 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
839 __asm__ __volatile__(".long 0xf0000564"); /* xsabsdp XT,XB */
843 /* determine dcbz/dcbzl sizes while we still have the signal
844 * handlers registered */
845 find_ppc_dcbz_sz(&vai);
847 r = VG_(sigaction)(VKI_SIGILL, &saved_sigill_act, NULL);
849 r = VG_(sigaction)(VKI_SIGFPE, &saved_sigfpe_act, NULL);
851 r = VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
853 VG_(debugLog)(1, "machine", "F %d V %d FX %d GX %d VX %d\n",
854 (Int)have_F, (Int)have_V, (Int)have_FX,
855 (Int)have_GX, (Int)have_VX);
856 /* Make FP a prerequisite for VMX (bogusly so), and for FX and GX. */
857 if (have_V && !have_F)
859 if (have_FX && !have_F)
861 if (have_GX && !have_F)
864 VG_(machine_ppc32_has_FP) = have_F ? 1 : 0;
865 VG_(machine_ppc32_has_VMX) = have_V ? 1 : 0;
870 if (have_F) vai.hwcaps |= VEX_HWCAPS_PPC32_F;
871 if (have_V) vai.hwcaps |= VEX_HWCAPS_PPC32_V;
872 if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC32_FX;
873 if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC32_GX;
874 if (have_VX) vai.hwcaps |= VEX_HWCAPS_PPC32_VX;
876 /* But we're not done yet: VG_(machine_ppc32_set_clszB) must be
877 called before we're ready to go. */
881 #elif defined(VGA_ppc64)
883 /* Same instruction set detection algorithm as for ppc32. */
884 vki_sigset_t saved_set, tmp_set;
885 vki_sigaction_fromK_t saved_sigill_act, saved_sigfpe_act;
886 vki_sigaction_toK_t tmp_sigill_act, tmp_sigfpe_act;
888 volatile Bool have_F, have_V, have_FX, have_GX, have_VX;
891 /* This is a kludge. Really we ought to back-convert saved_act
892 into a toK_t using VG_(convert_sigaction_fromK_to_toK), but
893 since that's a no-op on all ppc64 platforms so far supported,
894 it's not worth the typing effort. At least include most basic
896 vg_assert(sizeof(vki_sigaction_fromK_t) == sizeof(vki_sigaction_toK_t));
898 VG_(sigemptyset)(&tmp_set);
899 VG_(sigaddset)(&tmp_set, VKI_SIGILL);
900 VG_(sigaddset)(&tmp_set, VKI_SIGFPE);
902 r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
905 r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
907 tmp_sigill_act = saved_sigill_act;
909 VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
910 tmp_sigfpe_act = saved_sigfpe_act;
912 /* NODEFER: signal handler does not return (from the kernel's point of
913 view), hence if it is to successfully catch a signal more than once,
914 we need the NODEFER flag. */
915 tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
916 tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
917 tmp_sigill_act.sa_flags |= VKI_SA_NODEFER;
918 tmp_sigill_act.ksa_handler = handler_unsup_insn;
919 VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
921 tmp_sigfpe_act.sa_flags &= ~VKI_SA_RESETHAND;
922 tmp_sigfpe_act.sa_flags &= ~VKI_SA_SIGINFO;
923 tmp_sigfpe_act.sa_flags |= VKI_SA_NODEFER;
924 tmp_sigfpe_act.ksa_handler = handler_unsup_insn;
925 VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);
927 /* standard FP insns */
929 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
932 __asm__ __volatile__("fmr 0,0");
937 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
940 __asm__ __volatile__(".long 0x10000484"); /*vor 0,0,0*/
943 /* General-Purpose optional (fsqrt, fsqrts) */
945 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
948 __asm__ __volatile__(".long 0xFC00002C"); /*fsqrt 0,0*/
951 /* Graphics optional (stfiwx, fres, frsqrte, fsel) */
953 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
956 __asm__ __volatile__(".long 0xFC000034"); /*frsqrte 0,0*/
959 /* VSX support implies Power ISA 2.06 */
961 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
964 __asm__ __volatile__(".long 0xf0000564"); /* xsabsdp XT,XB */
967 /* determine dcbz/dcbzl sizes while we still have the signal
968 * handlers registered */
969 find_ppc_dcbz_sz(&vai);
971 VG_(sigaction)(VKI_SIGILL, &saved_sigill_act, NULL);
972 VG_(sigaction)(VKI_SIGFPE, &saved_sigfpe_act, NULL);
973 VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
974 VG_(debugLog)(1, "machine", "F %d V %d FX %d GX %d VX %d\n",
975 (Int)have_F, (Int)have_V, (Int)have_FX,
976 (Int)have_GX, (Int)have_VX);
977 /* on ppc64, if we don't even have FP, just give up. */
981 VG_(machine_ppc64_has_VMX) = have_V ? 1 : 0;
986 if (have_V) vai.hwcaps |= VEX_HWCAPS_PPC64_V;
987 if (have_FX) vai.hwcaps |= VEX_HWCAPS_PPC64_FX;
988 if (have_GX) vai.hwcaps |= VEX_HWCAPS_PPC64_GX;
989 if (have_VX) vai.hwcaps |= VEX_HWCAPS_PPC64_VX;
991 /* But we're not done yet: VG_(machine_ppc64_set_clszB) must be
992 called before we're ready to go. */
996 #elif defined(VGA_s390x)
998 /* Instruction set detection code borrowed from ppc above. */
999 vki_sigset_t saved_set, tmp_set;
1000 vki_sigaction_fromK_t saved_sigill_act;
1001 vki_sigaction_toK_t tmp_sigill_act;
1003 volatile Bool have_LDISP, have_EIMM, have_GIE, have_DFP, have_FGX;
1006 /* Unblock SIGILL and stash away the old action for that signal */
1007 VG_(sigemptyset)(&tmp_set);
1008 VG_(sigaddset)(&tmp_set, VKI_SIGILL);
1010 r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
1013 r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
1015 tmp_sigill_act = saved_sigill_act;
1017 /* NODEFER: signal handler does not return (from the kernel's point of
1018 view), hence if it is to successfully catch a signal more than once,
1019 we need the NODEFER flag. */
1020 tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
1021 tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
1022 tmp_sigill_act.sa_flags |= VKI_SA_NODEFER;
1023 tmp_sigill_act.ksa_handler = handler_unsup_insn;
1024 VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
1026 /* Determine hwcaps. Note, we cannot use the stfle insn because it
1027 is not supported on z900. */
1030 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
1033 /* BASR loads the address of the next insn into r1. Needed to avoid
1034 a segfault in XY. */
1035 __asm__ __volatile__("basr %%r1,%%r0\n\t"
1036 ".long 0xe3001000\n\t" /* XY 0,0(%r1) */
1037 ".short 0x0057" : : : "r0", "r1", "cc", "memory");
1041 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
1044 __asm__ __volatile__(".long 0xc0090000\n\t" /* iilf r0,0 */
1045 ".short 0x0000" : : : "r0", "memory");
1049 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
1052 __asm__ __volatile__(".long 0xc2010000\n\t" /* msfi r0,0 */
1053 ".short 0x0000" : : : "r0", "memory");
1057 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
1060 __asm__ __volatile__(".long 0xb3d20000"
1061 : : : "r0", "cc", "memory"); /* adtr r0,r0,r0 */
1065 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
1068 __asm__ __volatile__(".long 0xb3cd0000" : : : "r0"); /* lgdr r0,f0 */
1071 /* Restore signals */
1072 r = VG_(sigaction)(VKI_SIGILL, &saved_sigill_act, NULL);
1074 r = VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
1078 model = VG_(get_machine_model)();
1080 VG_(debugLog)(1, "machine", "machine %d LDISP %d EIMM %d GIE %d DFP %d "
1081 "FGX %d\n", model, have_LDISP, have_EIMM, have_GIE,
1082 have_DFP, have_FGX);
1084 if (model == VEX_S390X_MODEL_INVALID) return False;
1088 /* Use long displacement only on machines >= z990. For all other machines
1089 it is millicoded and therefore slow. */
1090 if (model >= VEX_S390X_MODEL_Z990)
1091 vai.hwcaps |= VEX_HWCAPS_S390X_LDISP;
1093 if (have_EIMM) vai.hwcaps |= VEX_HWCAPS_S390X_EIMM;
1094 if (have_GIE) vai.hwcaps |= VEX_HWCAPS_S390X_GIE;
1095 if (have_DFP) vai.hwcaps |= VEX_HWCAPS_S390X_DFP;
1096 if (have_FGX) vai.hwcaps |= VEX_HWCAPS_S390X_FGX;
1098 VG_(debugLog)(1, "machine", "hwcaps = 0x%x\n", vai.hwcaps);
1103 #elif defined(VGA_arm)
1105 /* Same instruction set detection algorithm as for ppc32. */
1106 vki_sigset_t saved_set, tmp_set;
1107 vki_sigaction_fromK_t saved_sigill_act, saved_sigfpe_act;
1108 vki_sigaction_toK_t tmp_sigill_act, tmp_sigfpe_act;
1110 volatile Bool have_VFP, have_VFP2, have_VFP3, have_NEON;
1111 volatile Int archlevel;
1114 /* This is a kludge. Really we ought to back-convert saved_act
1115 into a toK_t using VG_(convert_sigaction_fromK_to_toK), but
1116 since that's a no-op on all ppc64 platforms so far supported,
1117 it's not worth the typing effort. At least include most basic
1119 vg_assert(sizeof(vki_sigaction_fromK_t) == sizeof(vki_sigaction_toK_t));
1121 VG_(sigemptyset)(&tmp_set);
1122 VG_(sigaddset)(&tmp_set, VKI_SIGILL);
1123 VG_(sigaddset)(&tmp_set, VKI_SIGFPE);
1125 r = VG_(sigprocmask)(VKI_SIG_UNBLOCK, &tmp_set, &saved_set);
1128 r = VG_(sigaction)(VKI_SIGILL, NULL, &saved_sigill_act);
1130 tmp_sigill_act = saved_sigill_act;
1132 VG_(sigaction)(VKI_SIGFPE, NULL, &saved_sigfpe_act);
1133 tmp_sigfpe_act = saved_sigfpe_act;
1135 /* NODEFER: signal handler does not return (from the kernel's point of
1136 view), hence if it is to successfully catch a signal more than once,
1137 we need the NODEFER flag. */
1138 tmp_sigill_act.sa_flags &= ~VKI_SA_RESETHAND;
1139 tmp_sigill_act.sa_flags &= ~VKI_SA_SIGINFO;
1140 tmp_sigill_act.sa_flags |= VKI_SA_NODEFER;
1141 tmp_sigill_act.ksa_handler = handler_unsup_insn;
1142 VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
1144 tmp_sigfpe_act.sa_flags &= ~VKI_SA_RESETHAND;
1145 tmp_sigfpe_act.sa_flags &= ~VKI_SA_SIGINFO;
1146 tmp_sigfpe_act.sa_flags |= VKI_SA_NODEFER;
1147 tmp_sigfpe_act.ksa_handler = handler_unsup_insn;
1148 VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);
1152 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
1155 __asm__ __volatile__(".word 0xEEB02B42"); /* VMOV.F64 d2, d2 */
1157 /* There are several generation of VFP extension but they differs very
1158 little so for now we will not distinguish them. */
1159 have_VFP2 = have_VFP;
1160 have_VFP3 = have_VFP;
1164 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
1167 __asm__ __volatile__(".word 0xF2244154"); /* VMOV q2, q2 */
1170 /* ARM architecture level */
1171 archlevel = 5; /* v5 will be base level */
1172 if (archlevel < 7) {
1174 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
1177 __asm__ __volatile__(".word 0xF45FF000"); /* PLI [PC,#-0] */
1180 if (archlevel < 6) {
1182 if (VG_MINIMAL_SETJMP(env_unsup_insn)) {
1185 __asm__ __volatile__(".word 0xE6822012"); /* PKHBT r2, r2, r2 */
1189 VG_(convert_sigaction_fromK_to_toK)(&saved_sigill_act, &tmp_sigill_act);
1190 VG_(convert_sigaction_fromK_to_toK)(&saved_sigfpe_act, &tmp_sigfpe_act);
1191 VG_(sigaction)(VKI_SIGILL, &tmp_sigill_act, NULL);
1192 VG_(sigaction)(VKI_SIGFPE, &tmp_sigfpe_act, NULL);
1193 VG_(sigprocmask)(VKI_SIG_SETMASK, &saved_set, NULL);
1195 VG_(debugLog)(1, "machine", "ARMv%d VFP %d VFP2 %d VFP3 %d NEON %d\n",
1196 archlevel, (Int)have_VFP, (Int)have_VFP2, (Int)have_VFP3,
1199 VG_(machine_arm_archlevel) = archlevel;
1203 vai.hwcaps = VEX_ARM_ARCHLEVEL(archlevel);
1204 if (have_VFP3) vai.hwcaps |= VEX_HWCAPS_ARM_VFP3;
1205 if (have_VFP2) vai.hwcaps |= VEX_HWCAPS_ARM_VFP2;
1206 if (have_VFP) vai.hwcaps |= VEX_HWCAPS_ARM_VFP;
1207 if (have_NEON) vai.hwcaps |= VEX_HWCAPS_ARM_NEON;
1213 # error "Unknown arch"
1217 /* Notify host cpu cache line size. */
1218 #if defined(VGA_ppc32)
1219 void VG_(machine_ppc32_set_clszB)( Int szB )
1221 vg_assert(hwcaps_done);
1223 /* Either the value must not have been set yet (zero) or we can
1224 tolerate it being set to the same value multiple times, as the
1225 stack scanning logic in m_main is a bit stupid. */
1226 vg_assert(vai.ppc_cache_line_szB == 0
1227 || vai.ppc_cache_line_szB == szB);
1229 vg_assert(szB == 32 || szB == 64 || szB == 128);
1230 vai.ppc_cache_line_szB = szB;
1235 /* Notify host cpu cache line size. */
1236 #if defined(VGA_ppc64)
1237 void VG_(machine_ppc64_set_clszB)( Int szB )
1239 vg_assert(hwcaps_done);
1241 /* Either the value must not have been set yet (zero) or we can
1242 tolerate it being set to the same value multiple times, as the
1243 stack scanning logic in m_main is a bit stupid. */
1244 vg_assert(vai.ppc_cache_line_szB == 0
1245 || vai.ppc_cache_line_szB == szB);
1247 vg_assert(szB == 32 || szB == 64 || szB == 128);
1248 vai.ppc_cache_line_szB = szB;
1253 /* Notify host's ability to handle NEON instructions. */
1254 #if defined(VGA_arm)
1255 void VG_(machine_arm_set_has_NEON)( Bool has_neon )
1257 vg_assert(hwcaps_done);
1258 /* There's nothing else we can sanity check. */
1261 vai.hwcaps |= VEX_HWCAPS_ARM_NEON;
1263 vai.hwcaps &= ~VEX_HWCAPS_ARM_NEON;
1269 /* Fetch host cpu info, once established. */
1270 void VG_(machine_get_VexArchInfo)( /*OUT*/VexArch* pVa,
1271 /*OUT*/VexArchInfo* pVai )
1273 vg_assert(hwcaps_done);
1275 if (pVai) *pVai = vai;
1279 // Given a pointer to a function as obtained by "& functionname" in C,
1280 // produce a pointer to the actual entry point for the function.
1281 void* VG_(fnptr_to_fnentry)( void* f )
1283 #if defined(VGP_x86_linux) || defined(VGP_amd64_linux) \
1284 || defined(VGP_arm_linux) \
1285 || defined(VGP_ppc32_linux) || defined(VGO_darwin) \
1286 || defined(VGP_s390x_linux) || defined(VGO_l4re)
1288 #elif defined(VGP_ppc64_linux) || defined(VGP_ppc32_aix5) \
1289 || defined(VGP_ppc64_aix5)
1290 /* All other ppc variants use the AIX scheme, in which f is a
1291 pointer to a 3-word function descriptor, of which the first word
1292 is the entry address. */
1293 UWord* descr = (UWord*)f;
1294 return (void*)(descr[0]);
1296 # error "Unknown platform"
1300 /*--------------------------------------------------------------------*/
1302 /*--------------------------------------------------------------------*/