]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/valgrind/src/valgrind-3.6.0-svn/VEX/priv/main_main.c
update
[l4.git] / l4 / pkg / valgrind / src / valgrind-3.6.0-svn / VEX / priv / main_main.c
1
2 /*---------------------------------------------------------------*/
3 /*--- begin                                       main_main.c ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9
10    Copyright (C) 2004-2010 OpenWorks LLP
11       info@open-works.net
12
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26    02110-1301, USA.
27
28    The GNU General Public License is contained in the file COPYING.
29
30    Neither the names of the U.S. Department of Energy nor the
31    University of California nor the names of its contributors may be
32    used to endorse or promote products derived from this software
33    without prior written permission.
34 */
35
36 #include "libvex.h"
37 #include "libvex_emwarn.h"
38 #include "libvex_guest_x86.h"
39 #include "libvex_guest_amd64.h"
40 #include "libvex_guest_arm.h"
41 #include "libvex_guest_ppc32.h"
42 #include "libvex_guest_ppc64.h"
43 #include "libvex_guest_s390x.h"
44
45 #include "main_globals.h"
46 #include "main_util.h"
47 #include "host_generic_regs.h"
48 #include "ir_opt.h"
49
50 #include "host_x86_defs.h"
51 #include "host_amd64_defs.h"
52 #include "host_ppc_defs.h"
53 #include "host_arm_defs.h"
54 #include "host_s390_defs.h"
55
56 #include "guest_generic_bb_to_IR.h"
57 #include "guest_x86_defs.h"
58 #include "guest_amd64_defs.h"
59 #include "guest_arm_defs.h"
60 #include "guest_ppc_defs.h"
61 #include "guest_s390_defs.h"
62
63 #include "host_generic_simd128.h"
64
65
66 /* This file contains the top level interface to the library. */
67
68 /* --------- fwds ... --------- */
69
70 static Bool   are_valid_hwcaps ( VexArch arch, UInt hwcaps );
71 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
72
73
74 /* --------- Initialise the library. --------- */
75
76 /* Exported to library client. */
77
78 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
79 {
80    vcon->iropt_verbosity            = 0;
81    vcon->iropt_level                = 2;
82    vcon->iropt_precise_memory_exns  = False;
83    vcon->iropt_unroll_thresh        = 120;
84    vcon->guest_max_insns            = 60;
85    vcon->guest_chase_thresh         = 10;
86    vcon->guest_chase_cond           = False;
87 }
88
89
90 /* Exported to library client. */
91
92 void LibVEX_Init (
93    /* failure exit function */
94    __attribute__ ((noreturn))
95    void (*failure_exit) ( void ),
96    /* logging output function */
97    void (*log_bytes) ( HChar*, Int nbytes ),
98    /* debug paranoia level */
99    Int debuglevel,
100    /* Are we supporting valgrind checking? */
101    Bool valgrind_support,
102    /* Control ... */
103    /*READONLY*/VexControl* vcon
104 )
105 {
106    /* First off, do enough minimal setup so that the following
107       assertions can fail in a sane fashion, if need be. */
108    vex_failure_exit = failure_exit;
109    vex_log_bytes    = log_bytes;
110
111    /* Now it's safe to check parameters for sanity. */
112    vassert(!vex_initdone);
113    vassert(failure_exit);
114    vassert(log_bytes);
115    vassert(debuglevel >= 0);
116
117    vassert(vcon->iropt_verbosity >= 0);
118    vassert(vcon->iropt_level >= 0);
119    vassert(vcon->iropt_level <= 2);
120    vassert(vcon->iropt_unroll_thresh >= 0);
121    vassert(vcon->iropt_unroll_thresh <= 400);
122    vassert(vcon->guest_max_insns >= 1);
123    vassert(vcon->guest_max_insns <= 100);
124    vassert(vcon->guest_chase_thresh >= 0);
125    vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
126    vassert(vcon->guest_chase_cond == True 
127            || vcon->guest_chase_cond == False);
128
129    /* Check that Vex has been built with sizes of basic types as
130       stated in priv/libvex_basictypes.h.  Failure of any of these is
131       a serious configuration error and should be corrected
132       immediately.  If any of these assertions fail you can fully
133       expect Vex not to work properly, if at all. */
134
135    vassert(1 == sizeof(UChar));
136    vassert(1 == sizeof(Char));
137    vassert(2 == sizeof(UShort));
138    vassert(2 == sizeof(Short));
139    vassert(4 == sizeof(UInt));
140    vassert(4 == sizeof(Int));
141    vassert(8 == sizeof(ULong));
142    vassert(8 == sizeof(Long));
143    vassert(4 == sizeof(Float));
144    vassert(8 == sizeof(Double));
145    vassert(1 == sizeof(Bool));
146    vassert(4 == sizeof(Addr32));
147    vassert(8 == sizeof(Addr64));
148    vassert(16 == sizeof(U128));
149    vassert(16 == sizeof(V128));
150
151    vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
152    vassert(sizeof(void*) == sizeof(int*));
153    vassert(sizeof(void*) == sizeof(HWord));
154
155    vassert(VEX_HOST_WORDSIZE == sizeof(void*));
156    vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
157
158    /* Really start up .. */
159    vex_debuglevel         = debuglevel;
160    vex_valgrind_support   = valgrind_support;
161    vex_control            = *vcon;
162    vex_initdone           = True;
163    vexSetAllocMode ( VexAllocModeTEMP );
164 }
165
166
167 /* --------- Make a translation. --------- */
168
169 /* Exported to library client. */
170
171 VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
172 {
173    /* This the bundle of functions we need to do the back-end stuff
174       (insn selection, reg-alloc, assembly) whilst being insulated
175       from the target instruction set. */
176    HReg* available_real_regs;
177    Int   n_available_real_regs;
178    Bool         (*isMove)       ( HInstr*, HReg*, HReg* );
179    void         (*getRegUsage)  ( HRegUsage*, HInstr*, Bool );
180    void         (*mapRegs)      ( HRegRemap*, HInstr*, Bool );
181    void         (*genSpill)     ( HInstr**, HInstr**, HReg, Int, Bool );
182    void         (*genReload)    ( HInstr**, HInstr**, HReg, Int, Bool );
183    HInstr*      (*directReload) ( HInstr*, HReg, Short );
184    void         (*ppInstr)      ( HInstr*, Bool );
185    void         (*ppReg)        ( HReg );
186    HInstrArray* (*iselSB)       ( IRSB*, VexArch, VexArchInfo*, 
187                                                   VexAbiInfo* );
188    Int          (*emit)         ( UChar*, Int, HInstr*, Bool, void* );
189    IRExpr*      (*specHelper)   ( HChar*, IRExpr**, IRStmt**, Int );
190    Bool         (*preciseMemExnsFn) ( Int, Int );
191
192    DisOneInstrFn disInstrFn;
193
194    VexGuestLayout* guest_layout;
195    Bool            host_is_bigendian = False;
196    IRSB*           irsb;
197    HInstrArray*    vcode;
198    HInstrArray*    rcode;
199    Int             i, j, k, out_used, guest_sizeB;
200    Int             offB_TISTART, offB_TILEN;
201    UChar           insn_bytes[48];
202    IRType          guest_word_type;
203    IRType          host_word_type;
204    Bool            mode64;
205
206    guest_layout           = NULL;
207    available_real_regs    = NULL;
208    n_available_real_regs  = 0;
209    isMove                 = NULL;
210    getRegUsage            = NULL;
211    mapRegs                = NULL;
212    genSpill               = NULL;
213    genReload              = NULL;
214    directReload           = NULL;
215    ppInstr                = NULL;
216    ppReg                  = NULL;
217    iselSB                 = NULL;
218    emit                   = NULL;
219    specHelper             = NULL;
220    preciseMemExnsFn       = NULL;
221    disInstrFn             = NULL;
222    guest_word_type        = Ity_INVALID;
223    host_word_type         = Ity_INVALID;
224    offB_TISTART           = 0;
225    offB_TILEN             = 0;
226    mode64                 = False;
227
228    vex_traceflags = vta->traceflags;
229
230    vassert(vex_initdone);
231    vexSetAllocModeTEMP_and_clear();
232    vexAllocSanityCheck();
233
234    /* First off, check that the guest and host insn sets
235       are supported. */
236
237    switch (vta->arch_host) {
238
239       case VexArchX86:
240          mode64       = False;
241          getAllocableRegs_X86 ( &n_available_real_regs,
242                                 &available_real_regs );
243          isMove       = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_X86Instr;
244          getRegUsage  = (void(*)(HRegUsage*,HInstr*, Bool))
245                         getRegUsage_X86Instr;
246          mapRegs      = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_X86Instr;
247          genSpill     = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
248                         genSpill_X86;
249          genReload    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
250                         genReload_X86;
251          directReload = (HInstr*(*)(HInstr*,HReg,Short)) directReload_X86;
252          ppInstr      = (void(*)(HInstr*, Bool)) ppX86Instr;
253          ppReg        = (void(*)(HReg)) ppHRegX86;
254          iselSB       = iselSB_X86;
255          emit         = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_X86Instr;
256          host_is_bigendian = False;
257          host_word_type    = Ity_I32;
258          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
259          vassert(vta->dispatch != NULL); /* jump-to-dispatcher scheme */
260          break;
261
262       case VexArchAMD64:
263          mode64      = True;
264          getAllocableRegs_AMD64 ( &n_available_real_regs,
265                                   &available_real_regs );
266          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_AMD64Instr;
267          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool))
268                        getRegUsage_AMD64Instr;
269          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_AMD64Instr;
270          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
271                        genSpill_AMD64;
272          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
273                        genReload_AMD64;
274          ppInstr     = (void(*)(HInstr*, Bool)) ppAMD64Instr;
275          ppReg       = (void(*)(HReg)) ppHRegAMD64;
276          iselSB      = iselSB_AMD64;
277          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_AMD64Instr;
278          host_is_bigendian = False;
279          host_word_type    = Ity_I64;
280          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
281          vassert(vta->dispatch != NULL); /* jump-to-dispatcher scheme */
282          break;
283
284       case VexArchPPC32:
285          mode64      = False;
286          getAllocableRegs_PPC ( &n_available_real_regs,
287                                 &available_real_regs, mode64 );
288          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
289          getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPCInstr;
290          mapRegs     = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPCInstr;
291          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
292          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
293          ppInstr     = (void(*)(HInstr*,Bool)) ppPPCInstr;
294          ppReg       = (void(*)(HReg)) ppHRegPPC;
295          iselSB      = iselSB_PPC;
296          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr;
297          host_is_bigendian = True;
298          host_word_type    = Ity_I32;
299          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
300          vassert(vta->dispatch == NULL); /* return-to-dispatcher scheme */
301          break;
302
303       case VexArchPPC64:
304          mode64      = True;
305          getAllocableRegs_PPC ( &n_available_real_regs,
306                                 &available_real_regs, mode64 );
307          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
308          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPCInstr;
309          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPCInstr;
310          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
311          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
312          ppInstr     = (void(*)(HInstr*, Bool)) ppPPCInstr;
313          ppReg       = (void(*)(HReg)) ppHRegPPC;
314          iselSB      = iselSB_PPC;
315          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr;
316          host_is_bigendian = True;
317          host_word_type    = Ity_I64;
318          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
319          vassert(vta->dispatch == NULL); /* return-to-dispatcher scheme */
320          break;
321
322       case VexArchS390X:
323          mode64      = True;
324          getAllocableRegs_S390 ( &n_available_real_regs,
325                                  &available_real_regs, mode64 );
326          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_S390Instr;
327          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_S390Instr;
328          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_S390Instr;
329          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_S390;
330          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_S390;
331          ppInstr     = (void(*)(HInstr*, Bool)) ppS390Instr;
332          ppReg       = (void(*)(HReg)) ppHRegS390;
333          iselSB      = iselSB_S390;
334          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_S390Instr;
335          host_is_bigendian = True;
336          host_word_type    = Ity_I64;
337          vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_host.hwcaps));
338          vassert(vta->dispatch == NULL); /* return-to-dispatcher scheme */
339          break;
340
341       case VexArchARM:
342          mode64      = False;
343          getAllocableRegs_ARM ( &n_available_real_regs,
344                                 &available_real_regs );
345          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_ARMInstr;
346          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_ARMInstr;
347          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_ARMInstr;
348          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_ARM;
349          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_ARM;
350          ppInstr     = (void(*)(HInstr*, Bool)) ppARMInstr;
351          ppReg       = (void(*)(HReg)) ppHRegARM;
352          iselSB      = iselSB_ARM;
353          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_ARMInstr;
354          host_is_bigendian = False;
355          host_word_type    = Ity_I32;
356          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_host.hwcaps));
357          vassert(vta->dispatch == NULL); /* return-to-dispatcher scheme */
358          break;
359
360       default:
361          vpanic("LibVEX_Translate: unsupported host insn set");
362    }
363
364
365    switch (vta->arch_guest) {
366
367       case VexArchX86:
368          preciseMemExnsFn = guest_x86_state_requires_precise_mem_exns;
369          disInstrFn       = disInstr_X86;
370          specHelper       = guest_x86_spechelper;
371          guest_sizeB      = sizeof(VexGuestX86State);
372          guest_word_type  = Ity_I32;
373          guest_layout     = &x86guest_layout;
374          offB_TISTART     = offsetof(VexGuestX86State,guest_TISTART);
375          offB_TILEN       = offsetof(VexGuestX86State,guest_TILEN);
376          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_guest.hwcaps));
377          vassert(0 == sizeof(VexGuestX86State) % 16);
378          vassert(sizeof( ((VexGuestX86State*)0)->guest_TISTART) == 4);
379          vassert(sizeof( ((VexGuestX86State*)0)->guest_TILEN  ) == 4);
380          vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
381          break;
382
383       case VexArchAMD64:
384          preciseMemExnsFn = guest_amd64_state_requires_precise_mem_exns;
385          disInstrFn       = disInstr_AMD64;
386          specHelper       = guest_amd64_spechelper;
387          guest_sizeB      = sizeof(VexGuestAMD64State);
388          guest_word_type  = Ity_I64;
389          guest_layout     = &amd64guest_layout;
390          offB_TISTART     = offsetof(VexGuestAMD64State,guest_TISTART);
391          offB_TILEN       = offsetof(VexGuestAMD64State,guest_TILEN);
392          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_guest.hwcaps));
393          vassert(0 == sizeof(VexGuestAMD64State) % 16);
394          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TISTART ) == 8);
395          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TILEN   ) == 8);
396          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR  ) == 8);
397          break;
398
399       case VexArchPPC32:
400          preciseMemExnsFn = guest_ppc32_state_requires_precise_mem_exns;
401          disInstrFn       = disInstr_PPC;
402          specHelper       = guest_ppc32_spechelper;
403          guest_sizeB      = sizeof(VexGuestPPC32State);
404          guest_word_type  = Ity_I32;
405          guest_layout     = &ppc32Guest_layout;
406          offB_TISTART     = offsetof(VexGuestPPC32State,guest_TISTART);
407          offB_TILEN       = offsetof(VexGuestPPC32State,guest_TILEN);
408          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_guest.hwcaps));
409          vassert(0 == sizeof(VexGuestPPC32State) % 16);
410          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TISTART ) == 4);
411          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TILEN   ) == 4);
412          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR  ) == 4);
413          break;
414
415       case VexArchPPC64:
416          preciseMemExnsFn = guest_ppc64_state_requires_precise_mem_exns;
417          disInstrFn       = disInstr_PPC;
418          specHelper       = guest_ppc64_spechelper;
419          guest_sizeB      = sizeof(VexGuestPPC64State);
420          guest_word_type  = Ity_I64;
421          guest_layout     = &ppc64Guest_layout;
422          offB_TISTART     = offsetof(VexGuestPPC64State,guest_TISTART);
423          offB_TILEN       = offsetof(VexGuestPPC64State,guest_TILEN);
424          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_guest.hwcaps));
425          vassert(0 == sizeof(VexGuestPPC64State) % 16);
426          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TISTART    ) == 8);
427          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TILEN      ) == 8);
428          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR     ) == 8);
429          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
430          break;
431
432       case VexArchS390X:
433          preciseMemExnsFn = guest_s390x_state_requires_precise_mem_exns;
434          disInstrFn       = disInstr_S390;
435          specHelper       = guest_s390x_spechelper;
436          guest_sizeB      = sizeof(VexGuestS390XState);
437          guest_word_type  = Ity_I64;
438          guest_layout     = &s390xGuest_layout;
439          offB_TISTART     = offsetof(VexGuestS390XState,guest_TISTART);
440          offB_TILEN       = offsetof(VexGuestS390XState,guest_TILEN);
441          vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_guest.hwcaps));
442          vassert(0 == sizeof(VexGuestS390XState) % 16);
443          vassert(sizeof( ((VexGuestS390XState*)0)->guest_TISTART    ) == 8);
444          vassert(sizeof( ((VexGuestS390XState*)0)->guest_TILEN      ) == 8);
445          vassert(sizeof( ((VexGuestS390XState*)0)->guest_NRADDR     ) == 8);
446          break;
447
448       case VexArchARM:
449          preciseMemExnsFn = guest_arm_state_requires_precise_mem_exns;
450          disInstrFn       = disInstr_ARM;
451          specHelper       = guest_arm_spechelper;
452          guest_sizeB      = sizeof(VexGuestARMState);
453          guest_word_type  = Ity_I32;
454          guest_layout     = &armGuest_layout;
455          offB_TISTART     = offsetof(VexGuestARMState,guest_TISTART);
456          offB_TILEN       = offsetof(VexGuestARMState,guest_TILEN);
457          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_guest.hwcaps));
458          vassert(0 == sizeof(VexGuestARMState) % 16);
459          vassert(sizeof( ((VexGuestARMState*)0)->guest_TISTART) == 4);
460          vassert(sizeof( ((VexGuestARMState*)0)->guest_TILEN  ) == 4);
461          vassert(sizeof( ((VexGuestARMState*)0)->guest_NRADDR ) == 4);
462          break;
463
464       default:
465          vpanic("LibVEX_Translate: unsupported guest insn set");
466    }
467
468    /* yet more sanity checks ... */
469    if (vta->arch_guest == vta->arch_host) {
470       /* doesn't necessarily have to be true, but if it isn't it means
471          we are simulating one flavour of an architecture a different
472          flavour of the same architecture, which is pretty strange. */
473       vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
474    }
475
476    vexAllocSanityCheck();
477
478    if (vex_traceflags & VEX_TRACE_FE)
479       vex_printf("\n------------------------" 
480                    " Front end "
481                    "------------------------\n\n");
482
483    irsb = bb_to_IR ( vta->guest_extents,
484                      vta->callback_opaque,
485                      disInstrFn,
486                      vta->guest_bytes, 
487                      vta->guest_bytes_addr,
488                      vta->chase_into_ok,
489                      host_is_bigendian,
490                      vta->arch_guest,
491                      &vta->archinfo_guest,
492                      &vta->abiinfo_both,
493                      guest_word_type,
494                      vta->do_self_check,
495                      vta->preamble_function,
496                      offB_TISTART,
497                      offB_TILEN );
498
499    vexAllocSanityCheck();
500
501    if (irsb == NULL) {
502       /* Access failure. */
503       vexSetAllocModeTEMP_and_clear();
504       vex_traceflags = 0;
505       return VexTransAccessFail;
506    }
507
508    vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
509    vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
510    for (i = 0; i < vta->guest_extents->n_used; i++) {
511       vassert(vta->guest_extents->len[i] < 10000); /* sanity */
512    }
513
514    /* If debugging, show the raw guest bytes for this bb. */
515    if (0 || (vex_traceflags & VEX_TRACE_FE)) {
516       if (vta->guest_extents->n_used > 1) {
517          vex_printf("can't show code due to extents > 1\n");
518       } else {
519          /* HACK */
520          UChar* p = (UChar*)vta->guest_bytes;
521          UInt   sum = 0;
522          UInt   guest_bytes_read = (UInt)vta->guest_extents->len[0];
523          vex_printf("GuestBytes %llx %u ", vta->guest_bytes_addr, 
524                                            guest_bytes_read );
525          for (i = 0; i < guest_bytes_read; i++) {
526             UInt b = (UInt)p[i];
527             vex_printf(" %02x", b );
528             sum = (sum << 1) ^ b;
529          }
530          vex_printf("  %08x\n\n", sum);
531       }
532    }
533
534    /* Sanity check the initial IR. */
535    sanityCheckIRSB( irsb, "initial IR", 
536                     False/*can be non-flat*/, guest_word_type );
537
538    vexAllocSanityCheck();
539
540    /* Clean it up, hopefully a lot. */
541    irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn, 
542                               vta->guest_bytes_addr,
543                               vta->arch_guest );
544    sanityCheckIRSB( irsb, "after initial iropt", 
545                     True/*must be flat*/, guest_word_type );
546
547    if (vex_traceflags & VEX_TRACE_OPT1) {
548       vex_printf("\n------------------------" 
549                    " After pre-instr IR optimisation "
550                    "------------------------\n\n");
551       ppIRSB ( irsb );
552       vex_printf("\n");
553    }
554
555    vexAllocSanityCheck();
556
557    /* Get the thing instrumented. */
558    if (vta->instrument1)
559       irsb = vta->instrument1(vta->callback_opaque,
560                               irsb, guest_layout, 
561                               vta->guest_extents,
562                               guest_word_type, host_word_type);
563    vexAllocSanityCheck();
564
565    if (vta->instrument2)
566       irsb = vta->instrument2(vta->callback_opaque,
567                               irsb, guest_layout,
568                               vta->guest_extents,
569                               guest_word_type, host_word_type);
570       
571    if (vex_traceflags & VEX_TRACE_INST) {
572       vex_printf("\n------------------------" 
573                    " After instrumentation "
574                    "------------------------\n\n");
575       ppIRSB ( irsb );
576       vex_printf("\n");
577    }
578
579    if (vta->instrument1 || vta->instrument2)
580       sanityCheckIRSB( irsb, "after instrumentation",
581                        True/*must be flat*/, guest_word_type );
582
583    /* Do a post-instrumentation cleanup pass. */
584    if (vta->instrument1 || vta->instrument2) {
585       do_deadcode_BB( irsb );
586       irsb = cprop_BB( irsb );
587       do_deadcode_BB( irsb );
588       sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
589                        True/*must be flat*/, guest_word_type );
590    }
591
592    vexAllocSanityCheck();
593
594    if (vex_traceflags & VEX_TRACE_OPT2) {
595       vex_printf("\n------------------------" 
596                    " After post-instr IR optimisation "
597                    "------------------------\n\n");
598       ppIRSB ( irsb );
599       vex_printf("\n");
600    }
601
602    /* Turn it into virtual-registerised code.  Build trees -- this
603       also throws away any dead bindings. */
604    ado_treebuild_BB( irsb );
605
606    if (vta->finaltidy) {
607       irsb = vta->finaltidy(irsb);
608    }
609
610    vexAllocSanityCheck();
611
612    if (vex_traceflags & VEX_TRACE_TREES) {
613       vex_printf("\n------------------------" 
614                    "  After tree-building "
615                    "------------------------\n\n");
616       ppIRSB ( irsb );
617       vex_printf("\n");
618    }
619
620    /* HACK */
621    if (0) { *(vta->host_bytes_used) = 0; return VexTransOK; }
622    /* end HACK */
623
624    if (vex_traceflags & VEX_TRACE_VCODE)
625       vex_printf("\n------------------------" 
626                    " Instruction selection "
627                    "------------------------\n");
628
629    vcode = iselSB ( irsb, vta->arch_host, &vta->archinfo_host, 
630                                           &vta->abiinfo_both );
631
632    vexAllocSanityCheck();
633
634    if (vex_traceflags & VEX_TRACE_VCODE)
635       vex_printf("\n");
636
637    if (vex_traceflags & VEX_TRACE_VCODE) {
638       for (i = 0; i < vcode->arr_used; i++) {
639          vex_printf("%3d   ", i);
640          ppInstr(vcode->arr[i], mode64);
641          vex_printf("\n");
642       }
643       vex_printf("\n");
644    }
645
646    /* Register allocate. */
647    rcode = doRegisterAllocation ( vcode, available_real_regs,
648                                   n_available_real_regs,
649                                   isMove, getRegUsage, mapRegs, 
650                                   genSpill, genReload, directReload, 
651                                   guest_sizeB,
652                                   ppInstr, ppReg, mode64 );
653
654    vexAllocSanityCheck();
655
656    if (vex_traceflags & VEX_TRACE_RCODE) {
657       vex_printf("\n------------------------" 
658                    " Register-allocated code "
659                    "------------------------\n\n");
660       for (i = 0; i < rcode->arr_used; i++) {
661          vex_printf("%3d   ", i);
662          ppInstr(rcode->arr[i], mode64);
663          vex_printf("\n");
664       }
665       vex_printf("\n");
666    }
667
668    /* HACK */
669    if (0) { *(vta->host_bytes_used) = 0; return VexTransOK; }
670    /* end HACK */
671
672    /* Assemble */
673    if (vex_traceflags & VEX_TRACE_ASM) {
674       vex_printf("\n------------------------" 
675                    " Assembly "
676                    "------------------------\n\n");
677    }
678
679    out_used = 0; /* tracks along the host_bytes array */
680    for (i = 0; i < rcode->arr_used; i++) {
681       if (vex_traceflags & VEX_TRACE_ASM) {
682          ppInstr(rcode->arr[i], mode64);
683          vex_printf("\n");
684       }
685       j = (*emit)( insn_bytes, sizeof insn_bytes, rcode->arr[i], mode64,
686                    vta->dispatch );
687       if (vex_traceflags & VEX_TRACE_ASM) {
688          for (k = 0; k < j; k++)
689             if (insn_bytes[k] < 16)
690                vex_printf("0%x ",  (UInt)insn_bytes[k]);
691             else
692                vex_printf("%x ", (UInt)insn_bytes[k]);
693          vex_printf("\n\n");
694       }
695       if (out_used + j > vta->host_bytes_size) {
696          vexSetAllocModeTEMP_and_clear();
697          vex_traceflags = 0;
698          return VexTransOutputFull;
699       }
700       for (k = 0; k < j; k++) {
701          vta->host_bytes[out_used] = insn_bytes[k];
702          out_used++;
703       }
704       vassert(out_used <= vta->host_bytes_size);
705    }
706    *(vta->host_bytes_used) = out_used;
707
708    vexAllocSanityCheck();
709
710    vexSetAllocModeTEMP_and_clear();
711
712    vex_traceflags = 0;
713    return VexTransOK;
714 }
715
716
717 /* --------- Emulation warnings. --------- */
718
719 HChar* LibVEX_EmWarn_string ( VexEmWarn ew )
720 {
721    switch (ew) {
722      case EmWarn_NONE: 
723         return "none";
724      case EmWarn_X86_x87exns:
725         return "Unmasking x87 FP exceptions";
726      case EmWarn_X86_x87precision:
727         return "Selection of non-80-bit x87 FP precision";
728      case EmWarn_X86_sseExns:
729         return "Unmasking SSE FP exceptions";
730      case EmWarn_X86_fz:
731         return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
732      case EmWarn_X86_daz:
733         return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
734      case EmWarn_X86_acFlag:
735         return "Setting %eflags.ac (setting noted but ignored)";
736      case EmWarn_PPCexns:
737         return "Unmasking PPC32/64 FP exceptions";
738      case EmWarn_PPC64_redir_overflow:
739         return "PPC64 function redirection stack overflow";
740      case EmWarn_PPC64_redir_underflow:
741         return "PPC64 function redirection stack underflow";
742      default: 
743         vpanic("LibVEX_EmWarn_string: unknown warning");
744    }
745 }
746
747 /* ------------------ Arch/HwCaps stuff. ------------------ */
748
749 const HChar* LibVEX_ppVexArch ( VexArch arch )
750 {
751    switch (arch) {
752       case VexArch_INVALID: return "INVALID";
753       case VexArchX86:      return "X86";
754       case VexArchAMD64:    return "AMD64";
755       case VexArchARM:      return "ARM";
756       case VexArchPPC32:    return "PPC32";
757       case VexArchPPC64:    return "PPC64";
758       case VexArchS390X:    return "S390X";
759       default:              return "VexArch???";
760    }
761 }
762
763 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
764 {
765    HChar* str = show_hwcaps(arch,hwcaps);
766    return str ? str : "INVALID";
767 }
768
769
770 /* Write default settings info *vai. */
771 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
772 {
773    vai->hwcaps             = 0;
774    vai->ppc_cache_line_szB = 0;
775    vai->ppc_dcbz_szB       = 0;
776    vai->ppc_dcbzl_szB      = 0;
777
778 }
779
780 /* Write default settings info *vbi. */
781 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
782 {
783    vbi->guest_stack_redzone_size       = 0;
784    vbi->guest_amd64_assume_fs_is_zero  = False;
785    vbi->guest_amd64_assume_gs_is_0x60  = False;
786    vbi->guest_ppc_zap_RZ_at_blr        = False;
787    vbi->guest_ppc_zap_RZ_at_bl         = NULL;
788    vbi->guest_ppc_sc_continues_at_LR   = False;
789    vbi->host_ppc_calls_use_fndescrs    = False;
790    vbi->host_ppc32_regalign_int64_args = False;
791 }
792
793
794 /* Return a string showing the hwcaps in a nice way.  The string will
795    be NULL for invalid combinations of flags, so these functions also
796    serve as a way to validate hwcaps values. */
797
798 static HChar* show_hwcaps_x86 ( UInt hwcaps ) 
799 {
800    /* Monotonic, SSE3 > SSE2 > SSE1 > baseline. */
801    switch (hwcaps) {
802       case 0:
803          return "x86-sse0";
804       case VEX_HWCAPS_X86_SSE1:
805          return "x86-sse1";
806       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2:
807          return "x86-sse1-sse2";
808       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
809            | VEX_HWCAPS_X86_LZCNT:
810          return "x86-sse1-sse2-lzcnt";
811       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
812            | VEX_HWCAPS_X86_SSE3:
813          return "x86-sse1-sse2-sse3";
814       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
815            | VEX_HWCAPS_X86_SSE3 | VEX_HWCAPS_X86_LZCNT:
816          return "x86-sse1-sse2-sse3-lzcnt";
817       default:
818          return NULL;
819    }
820 }
821
822 static HChar* show_hwcaps_amd64 ( UInt hwcaps )
823 {
824    /* SSE3 and CX16 are orthogonal and > baseline, although we really
825       don't expect to come across anything which can do SSE3 but can't
826       do CX16.  Still, we can handle that case.  LZCNT is similarly
827       orthogonal. */
828    switch (hwcaps) {
829       case 0:
830          return "amd64-sse2";
831       case VEX_HWCAPS_AMD64_SSE3:
832          return "amd64-sse3";
833       case VEX_HWCAPS_AMD64_CX16:
834          return "amd64-sse2-cx16";
835       case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16:
836          return "amd64-sse3-cx16";
837       case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_LZCNT:
838          return "amd64-sse3-lzcnt";
839       case VEX_HWCAPS_AMD64_CX16 | VEX_HWCAPS_AMD64_LZCNT:
840          return "amd64-sse2-cx16-lzcnt";
841       case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16
842            | VEX_HWCAPS_AMD64_LZCNT:
843          return "amd64-sse3-cx16-lzcnt";
844
845       default:
846          return NULL;
847    }
848 }
849
850 static HChar* show_hwcaps_ppc32 ( UInt hwcaps )
851 {
852    /* Monotonic with complications.  Basically V > F > baseline,
853       but once you have F then you can have FX or GX too. */
854    const UInt F  = VEX_HWCAPS_PPC32_F;
855    const UInt V  = VEX_HWCAPS_PPC32_V;
856    const UInt FX = VEX_HWCAPS_PPC32_FX;
857    const UInt GX = VEX_HWCAPS_PPC32_GX;
858    const UInt VX = VEX_HWCAPS_PPC32_VX;
859          UInt c  = hwcaps;
860    if (c == 0)           return "ppc32-int";
861    if (c == F)           return "ppc32-int-flt";
862    if (c == (F|FX))      return "ppc32-int-flt-FX";
863    if (c == (F|GX))      return "ppc32-int-flt-GX";
864    if (c == (F|FX|GX))   return "ppc32-int-flt-FX-GX";
865    if (c == (F|V))       return "ppc32-int-flt-vmx";
866    if (c == (F|V|FX))    return "ppc32-int-flt-vmx-FX";
867    if (c == (F|V|GX))    return "ppc32-int-flt-vmx-GX";
868    if (c == (F|V|FX|GX)) return "ppc32-int-flt-vmx-FX-GX";
869    if (c == (F|V|FX|GX|VX)) return "ppc32-int-flt-vmx-FX-GX-VX";
870    return NULL;
871 }
872
873 static HChar* show_hwcaps_ppc64 ( UInt hwcaps )
874 {
875    /* Monotonic with complications.  Basically V > baseline(==F),
876       but once you have F then you can have FX or GX too. */
877    const UInt V  = VEX_HWCAPS_PPC64_V;
878    const UInt FX = VEX_HWCAPS_PPC64_FX;
879    const UInt GX = VEX_HWCAPS_PPC64_GX;
880    const UInt VX = VEX_HWCAPS_PPC64_VX;
881          UInt c  = hwcaps;
882    if (c == 0)         return "ppc64-int-flt";
883    if (c == FX)        return "ppc64-int-flt-FX";
884    if (c == GX)        return "ppc64-int-flt-GX";
885    if (c == (FX|GX))   return "ppc64-int-flt-FX-GX";
886    if (c == V)         return "ppc64-int-flt-vmx";
887    if (c == (V|FX))    return "ppc64-int-flt-vmx-FX";
888    if (c == (V|GX))    return "ppc64-int-flt-vmx-GX";
889    if (c == (V|FX|GX)) return "ppc64-int-flt-vmx-FX-GX";
890    if (c == (V|FX|GX|VX)) return "ppc64-int-flt-vmx-FX-GX-VX";
891    return NULL;
892 }
893
894 static HChar* show_hwcaps_arm ( UInt hwcaps )
895 {
896    Bool N = ((hwcaps & VEX_HWCAPS_ARM_NEON) != 0);
897    Bool vfp = ((hwcaps & (VEX_HWCAPS_ARM_VFP |
898                VEX_HWCAPS_ARM_VFP2 | VEX_HWCAPS_ARM_VFP3)) != 0);
899    switch (VEX_ARM_ARCHLEVEL(hwcaps)) {
900       case 5:
901          if (N)
902             return NULL;
903          if (vfp)
904             return "ARMv5-vfp";
905          else
906             return "ARMv5";
907          return NULL;
908       case 6:
909          if (N)
910             return NULL;
911          if (vfp)
912             return "ARMv6-vfp";
913          else
914             return "ARMv6";
915          return NULL;
916       case 7:
917          if (vfp) {
918             if (N)
919                return "ARMv7-vfp-neon";
920             else
921                return "ARMv7-vfp";
922          } else {
923             if (N)
924                return "ARMv7-neon";
925             else
926                return "ARMv7";
927          }
928       default:
929          return NULL;
930    }
931    return NULL;
932 }
933
934 static HChar* show_hwcaps_s390x ( UInt hwcaps )
935 {
936    static const HChar prefix[] = "s390x";
937    static const HChar facilities[][6] = {
938      { "ldisp" },
939      { "eimm" },
940      { "gie" },
941      { "dfp" },
942      { "fgx" },
943    };
944    static HChar buf[sizeof facilities + sizeof prefix + 1];
945    static HChar *p;
946
947    if (buf[0] != '\0') return buf;  /* already constructed */
948
949    hwcaps = VEX_HWCAPS_S390X(hwcaps);
950
951    p = buf + vex_sprintf(buf, "%s", prefix);
952    if (hwcaps & VEX_HWCAPS_S390X_LDISP)
953      p = p + vex_sprintf(p, "-%s", facilities[0]);
954    if (hwcaps & VEX_HWCAPS_S390X_EIMM)
955      p = p + vex_sprintf(p, "-%s", facilities[1]);
956    if (hwcaps & VEX_HWCAPS_S390X_GIE)
957      p = p + vex_sprintf(p, "-%s", facilities[2]);
958    if (hwcaps & VEX_HWCAPS_S390X_DFP)
959      p = p + vex_sprintf(p, "-%s", facilities[3]);
960    if (hwcaps & VEX_HWCAPS_S390X_FGX)
961      p = p + vex_sprintf(p, "-%s", facilities[4]);
962
963    /* If there are no facilities, add "zarch" */
964    if (hwcaps == 0)
965      vex_sprintf(p, "-%s", "zarch");
966
967    return buf;
968 }
969
970 /* ---- */
971 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
972 {
973    switch (arch) {
974       case VexArchX86:   return show_hwcaps_x86(hwcaps);
975       case VexArchAMD64: return show_hwcaps_amd64(hwcaps);
976       case VexArchPPC32: return show_hwcaps_ppc32(hwcaps);
977       case VexArchPPC64: return show_hwcaps_ppc64(hwcaps);
978       case VexArchARM:   return show_hwcaps_arm(hwcaps);
979       case VexArchS390X: return show_hwcaps_s390x(hwcaps);
980       default: return NULL;
981    }
982 }
983
984 static Bool are_valid_hwcaps ( VexArch arch, UInt hwcaps )
985 {
986    return show_hwcaps(arch,hwcaps) != NULL;
987 }
988
989
990 /*---------------------------------------------------------------*/
991 /*--- end                                         main_main.c ---*/
992 /*---------------------------------------------------------------*/