2 /*---------------------------------------------------------------*/
3 /*--- begin guest_arm_helpers.c ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2004-2010 OpenWorks LLP
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "libvex_basictypes.h"
32 #include "libvex_emwarn.h"
33 #include "libvex_guest_arm.h"
34 #include "libvex_ir.h"
37 #include "main_util.h"
38 #include "guest_generic_bb_to_IR.h"
39 #include "guest_arm_defs.h"
42 /* This file contains helper functions for arm guest code. Calls to
43 these functions are generated by the back end. These calls are of
44 course in the host machine code and this file will be compiled to
45 host machine code, so that all makes sense.
47 Only change the signatures of these helper functions very
48 carefully. If you change the signature here, you'll have to change
49 the parameters passed to it in the IR calls constructed by
55 /* generalised left-shifter */
56 static inline UInt lshift ( UInt x, Int n )
65 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
66 /* Calculate NZCV from the supplied thunk components, in the positions
67 they appear in the CPSR, viz bits 31:28 for N Z C V respectively.
68 Returned bits 27:0 are zero. */
69 UInt armg_calculate_flags_nzcv ( UInt cc_op, UInt cc_dep1,
70 UInt cc_dep2, UInt cc_dep3 )
74 /* (nzcv, unused, unused) */
76 case ARMG_CC_OP_ADD: {
77 /* (argL, argR, unused) */
80 UInt res = argL + argR;
81 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
82 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
83 // CF and VF need verification
84 UInt cf = lshift( res < argL, ARMG_CC_SHIFT_C );
85 UInt vf = lshift( (res ^ argL) & (res ^ argR),
86 ARMG_CC_SHIFT_V + 1 - 32 )
88 //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
89 // argL, argR, nf, zf, cf, vf);
90 return nf | zf | cf | vf;
92 case ARMG_CC_OP_SUB: {
93 /* (argL, argR, unused) */
96 UInt res = argL - argR;
97 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
98 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
99 // XXX cf is inverted relative to normal sense
100 UInt cf = lshift( argL >= argR, ARMG_CC_SHIFT_C );
101 UInt vf = lshift( (argL ^ argR) & (argL ^ res),
102 ARMG_CC_SHIFT_V + 1 - 32 )
104 //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
105 // argL, argR, nf, zf, cf, vf);
106 return nf | zf | cf | vf;
108 case ARMG_CC_OP_ADC: {
109 /* (argL, argR, oldC) */
113 UInt res = (argL + argR) + oldC;
114 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
115 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
116 UInt cf = oldC ? lshift( res <= argL, ARMG_CC_SHIFT_C )
117 : lshift( res < argL, ARMG_CC_SHIFT_C );
118 UInt vf = lshift( (res ^ argL) & (res ^ argR),
119 ARMG_CC_SHIFT_V + 1 - 32 )
121 //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
122 // argL, argR, nf, zf, cf, vf);
123 return nf | zf | cf | vf;
125 case ARMG_CC_OP_SBB: {
126 /* (argL, argR, oldC) */
130 UInt res = argL - argR - (oldC ^ 1);
131 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
132 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
133 UInt cf = oldC ? lshift( argL >= argR, ARMG_CC_SHIFT_C )
134 : lshift( argL > argR, ARMG_CC_SHIFT_C );
135 UInt vf = lshift( (argL ^ argR) & (argL ^ res),
136 ARMG_CC_SHIFT_V + 1 - 32 )
138 //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
139 // argL, argR, nf, zf, cf, vf);
140 return nf | zf | cf | vf;
142 case ARMG_CC_OP_LOGIC: {
143 /* (res, shco, oldV) */
147 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
148 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
149 UInt cf = lshift( shco & 1, ARMG_CC_SHIFT_C );
150 UInt vf = lshift( oldV & 1, ARMG_CC_SHIFT_V );
151 return nf | zf | cf | vf;
153 case ARMG_CC_OP_MUL: {
154 /* (res, unused, oldC:oldV) */
156 UInt oldC = (cc_dep3 >> 1) & 1;
157 UInt oldV = (cc_dep3 >> 0) & 1;
158 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
159 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
160 UInt cf = lshift( oldC & 1, ARMG_CC_SHIFT_C );
161 UInt vf = lshift( oldV & 1, ARMG_CC_SHIFT_V );
162 return nf | zf | cf | vf;
164 case ARMG_CC_OP_MULL: {
165 /* (resLo32, resHi32, oldC:oldV) */
166 UInt resLo32 = cc_dep1;
167 UInt resHi32 = cc_dep2;
168 UInt oldC = (cc_dep3 >> 1) & 1;
169 UInt oldV = (cc_dep3 >> 0) & 1;
170 UInt nf = lshift( resHi32 & (1<<31), ARMG_CC_SHIFT_N - 31 );
171 UInt zf = lshift( (resHi32|resLo32) == 0, ARMG_CC_SHIFT_Z );
172 UInt cf = lshift( oldC & 1, ARMG_CC_SHIFT_C );
173 UInt vf = lshift( oldV & 1, ARMG_CC_SHIFT_V );
174 return nf | zf | cf | vf;
177 /* shouldn't really make these calls from generated code */
178 vex_printf("armg_calculate_flags_nzcv"
179 "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
180 cc_op, cc_dep1, cc_dep2, cc_dep3 );
181 vpanic("armg_calculate_flags_nzcv");
186 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
187 /* Calculate the C flag from the thunk components, in the lowest bit
188 of the word (bit 0). */
189 UInt armg_calculate_flag_c ( UInt cc_op, UInt cc_dep1,
190 UInt cc_dep2, UInt cc_dep3 )
192 UInt r = armg_calculate_flags_nzcv(cc_op, cc_dep1, cc_dep2, cc_dep3);
193 return (r >> ARMG_CC_SHIFT_C) & 1;
197 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
198 /* Calculate the V flag from the thunk components, in the lowest bit
199 of the word (bit 0). */
200 UInt armg_calculate_flag_v ( UInt cc_op, UInt cc_dep1,
201 UInt cc_dep2, UInt cc_dep3 )
203 UInt r = armg_calculate_flags_nzcv(cc_op, cc_dep1, cc_dep2, cc_dep3);
204 return (r >> ARMG_CC_SHIFT_V) & 1;
208 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
209 /* Calculate the specified condition from the thunk components, in the
210 lowest bit of the word (bit 0). */
212 UInt armg_calculate_condition ( UInt cond_n_op /* ARMCondcode << 4 | cc_op */,
214 UInt cc_dep2, UInt cc_dep3 )
216 UInt cond = cond_n_op >> 4;
217 UInt cc_op = cond_n_op & 0xF;
220 // vex_printf("XXXXXXXX %x %x %x %x\n", cond_n_op, cc_dep1, cc_dep2, cc_dep3);
221 UInt nzcv = armg_calculate_flags_nzcv(cc_op, cc_dep1, cc_dep2, cc_dep3);
224 case ARMCondEQ: // Z=1 => z
225 case ARMCondNE: // Z=0
226 zf = nzcv >> ARMG_CC_SHIFT_Z;
227 return 1 & (inv ^ zf);
229 case ARMCondHS: // C=1 => c
230 case ARMCondLO: // C=0
231 cf = nzcv >> ARMG_CC_SHIFT_C;
232 return 1 & (inv ^ cf);
234 case ARMCondMI: // N=1 => n
235 case ARMCondPL: // N=0
236 nf = nzcv >> ARMG_CC_SHIFT_N;
237 return 1 & (inv ^ nf);
239 case ARMCondVS: // V=1 => v
240 case ARMCondVC: // V=0
241 vf = nzcv >> ARMG_CC_SHIFT_V;
242 return 1 & (inv ^ vf);
244 case ARMCondHI: // C=1 && Z=0 => c & ~z
245 case ARMCondLS: // C=0 || Z=1
246 cf = nzcv >> ARMG_CC_SHIFT_C;
247 zf = nzcv >> ARMG_CC_SHIFT_Z;
248 return 1 & (inv ^ (cf & ~zf));
250 case ARMCondGE: // N=V => ~(n^v)
251 case ARMCondLT: // N!=V
252 nf = nzcv >> ARMG_CC_SHIFT_N;
253 vf = nzcv >> ARMG_CC_SHIFT_V;
254 return 1 & (inv ^ ~(nf ^ vf));
256 case ARMCondGT: // Z=0 && N=V => ~z & ~(n^v) => ~(z | (n^v))
257 case ARMCondLE: // Z=1 || N!=V
258 nf = nzcv >> ARMG_CC_SHIFT_N;
259 vf = nzcv >> ARMG_CC_SHIFT_V;
260 zf = nzcv >> ARMG_CC_SHIFT_Z;
261 return 1 & (inv ^ ~(zf | (nf ^ vf)));
263 case ARMCondAL: // should never get here: Always => no flags to calc
264 case ARMCondNV: // should never get here: Illegal instr
266 /* shouldn't really make these calls from generated code */
267 vex_printf("armg_calculate_condition(ARM)"
268 "( %u, %u, 0x%x, 0x%x, 0x%x )\n",
269 cond, cc_op, cc_dep1, cc_dep2, cc_dep3 );
270 vpanic("armg_calculate_condition(ARM)");
275 /*---------------------------------------------------------------*/
276 /*--- Flag-helpers translation-time function specialisers. ---*/
277 /*--- These help iropt specialise calls the above run-time ---*/
278 /*--- flags functions. ---*/
279 /*---------------------------------------------------------------*/
281 /* Used by the optimiser to try specialisations. Returns an
282 equivalent expression, or NULL if none. */
284 static Bool isU32 ( IRExpr* e, UInt n )
287 toBool( e->tag == Iex_Const
288 && e->Iex.Const.con->tag == Ico_U32
289 && e->Iex.Const.con->Ico.U32 == n );
292 IRExpr* guest_arm_spechelper ( HChar* function_name,
295 # define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
296 # define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
297 # define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
298 # define mkU8(_n) IRExpr_Const(IRConst_U8(_n))
301 for (i = 0; args[i]; i++)
304 vex_printf("spec request:\n");
305 vex_printf(" %s ", function_name);
306 for (i = 0; i < arity; i++) {
313 /* --------- specialising "x86g_calculate_condition" --------- */
315 if (vex_streq(function_name, "armg_calculate_condition")) {
316 /* specialise calls to above "armg_calculate condition" function */
317 IRExpr *cond_n_op, *cc_dep1, *cc_dep2, *cc_dep3;
319 cond_n_op = args[0]; /* ARMCondcode << 4 | ARMG_CC_OP_* */
324 /*---------------- SUB ----------------*/
326 if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_SUB)) {
327 /* EQ after SUB --> test argL == argR */
328 return unop(Iop_1Uto32,
329 binop(Iop_CmpEQ32, cc_dep1, cc_dep2));
331 if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_SUB)) {
332 /* NE after SUB --> test argL != argR */
333 return unop(Iop_1Uto32,
334 binop(Iop_CmpNE32, cc_dep1, cc_dep2));
337 if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_SUB)) {
338 /* LE after SUB --> test argL <=s argR */
339 return unop(Iop_1Uto32,
340 binop(Iop_CmpLE32S, cc_dep1, cc_dep2));
343 if (isU32(cond_n_op, (ARMCondLT << 4) | ARMG_CC_OP_SUB)) {
344 /* LE after SUB --> test argL <s argR */
345 return unop(Iop_1Uto32,
346 binop(Iop_CmpLT32S, cc_dep1, cc_dep2));
349 if (isU32(cond_n_op, (ARMCondGE << 4) | ARMG_CC_OP_SUB)) {
350 /* GE after SUB --> test argL >=s argR
351 --> test argR <=s argL */
352 return unop(Iop_1Uto32,
353 binop(Iop_CmpLE32S, cc_dep2, cc_dep1));
356 if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SUB)) {
357 /* HS after SUB --> test argL >=u argR
358 --> test argR <=u argL */
359 return unop(Iop_1Uto32,
360 binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
363 if (isU32(cond_n_op, (ARMCondLS << 4) | ARMG_CC_OP_SUB)) {
364 /* LS after SUB --> test argL <=u argR */
365 return unop(Iop_1Uto32,
366 binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
369 /*---------------- LOGIC ----------------*/
370 if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_LOGIC)) {
371 /* EQ after LOGIC --> test res == 0 */
372 return unop(Iop_1Uto32,
373 binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
375 if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_LOGIC)) {
376 /* NE after LOGIC --> test res != 0 */
377 return unop(Iop_1Uto32,
378 binop(Iop_CmpNE32, cc_dep1, mkU32(0)));
392 /*----------------------------------------------*/
393 /*--- The exported fns .. ---*/
394 /*----------------------------------------------*/
396 /* VISIBLE TO LIBVEX CLIENT */
398 void LibVEX_GuestARM_put_flags ( UInt flags_native,
399 /*OUT*/VexGuestARMState* vex_state )
403 /* Mask out everything except N Z V C. */
405 &= (ARMG_CC_MASK_N | ARMG_CC_MASK_Z | ARMG_CC_MASK_V | ARMG_CC_MASK_C);
407 vex_state->guest_CC_OP = ARMG_CC_OP_COPY;
408 vex_state->guest_CC_DEP1 = flags_native;
409 vex_state->guest_CC_DEP2 = 0;
410 vex_state->guest_CC_NDEP = 0;
414 /* VISIBLE TO LIBVEX CLIENT */
415 UInt LibVEX_GuestARM_get_cpsr ( /*IN*/VexGuestARMState* vex_state )
418 nzcv = armg_calculate_flags_nzcv(
419 vex_state->guest_CC_OP,
420 vex_state->guest_CC_DEP1,
421 vex_state->guest_CC_DEP2,
422 vex_state->guest_CC_NDEP
427 /* VISIBLE TO LIBVEX CLIENT */
428 void LibVEX_GuestARM_initialise ( /*OUT*/VexGuestARMState* vex_state )
430 vex_state->guest_R0 = 0;
431 vex_state->guest_R1 = 0;
432 vex_state->guest_R2 = 0;
433 vex_state->guest_R3 = 0;
434 vex_state->guest_R4 = 0;
435 vex_state->guest_R5 = 0;
436 vex_state->guest_R6 = 0;
437 vex_state->guest_R7 = 0;
438 vex_state->guest_R8 = 0;
439 vex_state->guest_R9 = 0;
440 vex_state->guest_R10 = 0;
441 vex_state->guest_R11 = 0;
442 vex_state->guest_R12 = 0;
443 vex_state->guest_R13 = 0;
444 vex_state->guest_R14 = 0;
445 vex_state->guest_R15 = 0;
447 vex_state->guest_CC_OP = ARMG_CC_OP_COPY;
448 vex_state->guest_CC_DEP1 = 0;
449 vex_state->guest_CC_DEP2 = 0;
450 vex_state->guest_CC_NDEP = 0;
452 vex_state->guest_EMWARN = 0;
453 vex_state->guest_TISTART = 0;
454 vex_state->guest_TILEN = 0;
455 vex_state->guest_NRADDR = 0;
456 vex_state->guest_IP_AT_SYSCALL = 0;
458 vex_state->guest_D0 = 0;
459 vex_state->guest_D1 = 0;
460 vex_state->guest_D2 = 0;
461 vex_state->guest_D3 = 0;
462 vex_state->guest_D4 = 0;
463 vex_state->guest_D5 = 0;
464 vex_state->guest_D6 = 0;
465 vex_state->guest_D7 = 0;
466 vex_state->guest_D8 = 0;
467 vex_state->guest_D9 = 0;
468 vex_state->guest_D10 = 0;
469 vex_state->guest_D11 = 0;
470 vex_state->guest_D12 = 0;
471 vex_state->guest_D13 = 0;
472 vex_state->guest_D14 = 0;
473 vex_state->guest_D15 = 0;
475 /* ARM encoded; zero is the default as it happens (result flags
476 (NZCV) cleared, FZ disabled, round to nearest, non-vector mode,
477 all exns masked, all exn sticky bits cleared). */
478 vex_state->guest_FPSCR = 0;
480 vex_state->guest_TPIDRURO = 0;
482 /* vex_state->padding1 = 0; */
483 /* vex_state->padding2 = 0; */
487 /*-----------------------------------------------------------*/
488 /*--- Describing the arm guest state, for the benefit ---*/
489 /*--- of iropt and instrumenters. ---*/
490 /*-----------------------------------------------------------*/
492 /* Figure out if any part of the guest state contained in minoff
493 .. maxoff requires precise memory exceptions. If in doubt return
494 True (but this is generates significantly slower code).
496 We enforce precise exns for guest R13(sp), R15(pc).
498 Bool guest_arm_state_requires_precise_mem_exns ( Int minoff,
501 Int sp_min = offsetof(VexGuestARMState, guest_R13);
502 Int sp_max = sp_min + 4 - 1;
503 Int pc_min = offsetof(VexGuestARMState, guest_R15);
504 Int pc_max = pc_min + 4 - 1;
506 if (maxoff < sp_min || minoff > sp_max) {
507 /* no overlap with sp */
512 if (maxoff < pc_min || minoff > pc_max) {
513 /* no overlap with pc */
518 /* We appear to need precise updates of R11 in order to get proper
519 stacktraces from non-optimised code. */
520 Int r11_min = offsetof(VexGuestARMState, guest_R11);
521 Int r11_max = r11_min + 4 - 1;
523 if (maxoff < r11_min || minoff > r11_max) {
524 /* no overlap with pc */
534 #define ALWAYSDEFD(field) \
535 { offsetof(VexGuestARMState, field), \
536 (sizeof ((VexGuestARMState*)0)->field) }
541 /* Total size of the guest state, in bytes. */
542 .total_sizeB = sizeof(VexGuestARMState),
544 /* Describe the stack pointer. */
545 .offset_SP = offsetof(VexGuestARMState,guest_R13),
548 /* Describe the instruction pointer. */
549 .offset_IP = offsetof(VexGuestARMState,guest_R15),
552 /* Describe any sections to be regarded by Memcheck as
556 /* flags thunk: OP is always defd, whereas DEP1 and DEP2
557 have to be tracked. See detailed comment in gdefs.h on
558 meaning of thunk fields. */
560 = { /* 0 */ ALWAYSDEFD(guest_R15),
561 /* 1 */ ALWAYSDEFD(guest_CC_OP),
562 /* 2 */ ALWAYSDEFD(guest_CC_NDEP),
563 /* 3 */ ALWAYSDEFD(guest_EMWARN),
564 /* 4 */ ALWAYSDEFD(guest_TISTART),
565 /* 5 */ ALWAYSDEFD(guest_TILEN),
566 /* 6 */ ALWAYSDEFD(guest_NRADDR),
567 /* 7 */ ALWAYSDEFD(guest_IP_AT_SYSCALL),
568 /* 8 */ ALWAYSDEFD(guest_TPIDRURO)
573 /*---------------------------------------------------------------*/
574 /*--- end guest_arm_helpers.c ---*/
575 /*---------------------------------------------------------------*/