2 /*---------------------------------------------------------------*/
3 /*--- begin guest_arm_helpers.c ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2004-2010 OpenWorks LLP
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "libvex_basictypes.h"
32 #include "libvex_emwarn.h"
33 #include "libvex_guest_arm.h"
34 #include "libvex_ir.h"
37 #include "main_util.h"
38 #include "guest_generic_bb_to_IR.h"
39 #include "guest_arm_defs.h"
42 /* This file contains helper functions for arm guest code. Calls to
43 these functions are generated by the back end. These calls are of
44 course in the host machine code and this file will be compiled to
45 host machine code, so that all makes sense.
47 Only change the signatures of these helper functions very
48 carefully. If you change the signature here, you'll have to change
49 the parameters passed to it in the IR calls constructed by
55 /* generalised left-shifter */
56 static inline UInt lshift ( UInt x, Int n )
65 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
66 /* Calculate NZCV from the supplied thunk components, in the positions
67 they appear in the CPSR, viz bits 31:28 for N Z C V respectively.
68 Returned bits 27:0 are zero. */
69 UInt armg_calculate_flags_nzcv ( UInt cc_op, UInt cc_dep1,
70 UInt cc_dep2, UInt cc_dep3 )
74 /* (nzcv, unused, unused) */
76 case ARMG_CC_OP_ADD: {
77 /* (argL, argR, unused) */
80 UInt res = argL + argR;
81 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
82 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
83 // CF and VF need verification
84 UInt cf = lshift( res < argL, ARMG_CC_SHIFT_C );
85 UInt vf = lshift( (res ^ argL) & (res ^ argR),
86 ARMG_CC_SHIFT_V + 1 - 32 )
88 //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
89 // argL, argR, nf, zf, cf, vf);
90 return nf | zf | cf | vf;
92 case ARMG_CC_OP_SUB: {
93 /* (argL, argR, unused) */
96 UInt res = argL - argR;
97 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
98 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
99 // XXX cf is inverted relative to normal sense
100 UInt cf = lshift( argL >= argR, ARMG_CC_SHIFT_C );
101 UInt vf = lshift( (argL ^ argR) & (argL ^ res),
102 ARMG_CC_SHIFT_V + 1 - 32 )
104 //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
105 // argL, argR, nf, zf, cf, vf);
106 return nf | zf | cf | vf;
108 case ARMG_CC_OP_ADC: {
109 /* (argL, argR, oldC) */
113 UInt res = (argL + argR) + oldC;
114 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
115 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
116 UInt cf = oldC ? lshift( res <= argL, ARMG_CC_SHIFT_C )
117 : lshift( res < argL, ARMG_CC_SHIFT_C );
118 UInt vf = lshift( (res ^ argL) & (res ^ argR),
119 ARMG_CC_SHIFT_V + 1 - 32 )
121 //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
122 // argL, argR, nf, zf, cf, vf);
123 return nf | zf | cf | vf;
125 case ARMG_CC_OP_SBB: {
126 /* (argL, argR, oldC) */
130 UInt res = argL - argR - (oldC ^ 1);
131 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
132 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
133 UInt cf = oldC ? lshift( argL >= argR, ARMG_CC_SHIFT_C )
134 : lshift( argL > argR, ARMG_CC_SHIFT_C );
135 UInt vf = lshift( (argL ^ argR) & (argL ^ res),
136 ARMG_CC_SHIFT_V + 1 - 32 )
138 //vex_printf("%08x %08x -> n %x z %x c %x v %x\n",
139 // argL, argR, nf, zf, cf, vf);
140 return nf | zf | cf | vf;
142 case ARMG_CC_OP_LOGIC: {
143 /* (res, shco, oldV) */
147 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
148 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
149 UInt cf = lshift( shco & 1, ARMG_CC_SHIFT_C );
150 UInt vf = lshift( oldV & 1, ARMG_CC_SHIFT_V );
151 return nf | zf | cf | vf;
153 case ARMG_CC_OP_MUL: {
154 /* (res, unused, oldC:oldV) */
156 UInt oldC = (cc_dep3 >> 1) & 1;
157 UInt oldV = (cc_dep3 >> 0) & 1;
158 UInt nf = lshift( res & (1<<31), ARMG_CC_SHIFT_N - 31 );
159 UInt zf = lshift( res == 0, ARMG_CC_SHIFT_Z );
160 UInt cf = lshift( oldC & 1, ARMG_CC_SHIFT_C );
161 UInt vf = lshift( oldV & 1, ARMG_CC_SHIFT_V );
162 return nf | zf | cf | vf;
164 case ARMG_CC_OP_MULL: {
165 /* (resLo32, resHi32, oldC:oldV) */
166 UInt resLo32 = cc_dep1;
167 UInt resHi32 = cc_dep2;
168 UInt oldC = (cc_dep3 >> 1) & 1;
169 UInt oldV = (cc_dep3 >> 0) & 1;
170 UInt nf = lshift( resHi32 & (1<<31), ARMG_CC_SHIFT_N - 31 );
171 UInt zf = lshift( (resHi32|resLo32) == 0, ARMG_CC_SHIFT_Z );
172 UInt cf = lshift( oldC & 1, ARMG_CC_SHIFT_C );
173 UInt vf = lshift( oldV & 1, ARMG_CC_SHIFT_V );
174 return nf | zf | cf | vf;
177 /* shouldn't really make these calls from generated code */
178 vex_printf("armg_calculate_flags_nzcv"
179 "( op=%u, dep1=0x%x, dep2=0x%x, dep3=0x%x )\n",
180 cc_op, cc_dep1, cc_dep2, cc_dep3 );
181 vpanic("armg_calculate_flags_nzcv");
186 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
187 /* Calculate the C flag from the thunk components, in the lowest bit
188 of the word (bit 0). */
189 UInt armg_calculate_flag_c ( UInt cc_op, UInt cc_dep1,
190 UInt cc_dep2, UInt cc_dep3 )
192 UInt r = armg_calculate_flags_nzcv(cc_op, cc_dep1, cc_dep2, cc_dep3);
193 return (r >> ARMG_CC_SHIFT_C) & 1;
197 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
198 /* Calculate the V flag from the thunk components, in the lowest bit
199 of the word (bit 0). */
200 UInt armg_calculate_flag_v ( UInt cc_op, UInt cc_dep1,
201 UInt cc_dep2, UInt cc_dep3 )
203 UInt r = armg_calculate_flags_nzcv(cc_op, cc_dep1, cc_dep2, cc_dep3);
204 return (r >> ARMG_CC_SHIFT_V) & 1;
207 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
208 /* Calculate the QC flag from the arguments, in the lowest bit
209 of the word (bit 0). Urr, having this out of line is bizarre.
211 UInt armg_calculate_flag_qc ( UInt resL1, UInt resL2,
212 UInt resR1, UInt resR2 )
214 if (resL1 != resR1 || resL2 != resR2)
220 /* CALLED FROM GENERATED CODE: CLEAN HELPER */
221 /* Calculate the specified condition from the thunk components, in the
222 lowest bit of the word (bit 0). */
224 UInt armg_calculate_condition ( UInt cond_n_op /* ARMCondcode << 4 | cc_op */,
226 UInt cc_dep2, UInt cc_dep3 )
228 UInt cond = cond_n_op >> 4;
229 UInt cc_op = cond_n_op & 0xF;
230 UInt nf, zf, vf, cf, nzcv, inv;
231 // vex_printf("XXXXXXXX %x %x %x %x\n",
232 // cond_n_op, cc_dep1, cc_dep2, cc_dep3);
234 // skip flags computation in this case
235 if (cond == ARMCondAL) return 1;
238 nzcv = armg_calculate_flags_nzcv(cc_op, cc_dep1, cc_dep2, cc_dep3);
241 case ARMCondEQ: // Z=1 => z
242 case ARMCondNE: // Z=0
243 zf = nzcv >> ARMG_CC_SHIFT_Z;
244 return 1 & (inv ^ zf);
246 case ARMCondHS: // C=1 => c
247 case ARMCondLO: // C=0
248 cf = nzcv >> ARMG_CC_SHIFT_C;
249 return 1 & (inv ^ cf);
251 case ARMCondMI: // N=1 => n
252 case ARMCondPL: // N=0
253 nf = nzcv >> ARMG_CC_SHIFT_N;
254 return 1 & (inv ^ nf);
256 case ARMCondVS: // V=1 => v
257 case ARMCondVC: // V=0
258 vf = nzcv >> ARMG_CC_SHIFT_V;
259 return 1 & (inv ^ vf);
261 case ARMCondHI: // C=1 && Z=0 => c & ~z
262 case ARMCondLS: // C=0 || Z=1
263 cf = nzcv >> ARMG_CC_SHIFT_C;
264 zf = nzcv >> ARMG_CC_SHIFT_Z;
265 return 1 & (inv ^ (cf & ~zf));
267 case ARMCondGE: // N=V => ~(n^v)
268 case ARMCondLT: // N!=V
269 nf = nzcv >> ARMG_CC_SHIFT_N;
270 vf = nzcv >> ARMG_CC_SHIFT_V;
271 return 1 & (inv ^ ~(nf ^ vf));
273 case ARMCondGT: // Z=0 && N=V => ~z & ~(n^v) => ~(z | (n^v))
274 case ARMCondLE: // Z=1 || N!=V
275 nf = nzcv >> ARMG_CC_SHIFT_N;
276 vf = nzcv >> ARMG_CC_SHIFT_V;
277 zf = nzcv >> ARMG_CC_SHIFT_Z;
278 return 1 & (inv ^ ~(zf | (nf ^ vf)));
280 case ARMCondAL: // handled above
281 case ARMCondNV: // should never get here: Illegal instr
283 /* shouldn't really make these calls from generated code */
284 vex_printf("armg_calculate_condition(ARM)"
285 "( %u, %u, 0x%x, 0x%x, 0x%x )\n",
286 cond, cc_op, cc_dep1, cc_dep2, cc_dep3 );
287 vpanic("armg_calculate_condition(ARM)");
292 /*---------------------------------------------------------------*/
293 /*--- Flag-helpers translation-time function specialisers. ---*/
294 /*--- These help iropt specialise calls the above run-time ---*/
295 /*--- flags functions. ---*/
296 /*---------------------------------------------------------------*/
298 /* Used by the optimiser to try specialisations. Returns an
299 equivalent expression, or NULL if none. */
301 static Bool isU32 ( IRExpr* e, UInt n )
304 toBool( e->tag == Iex_Const
305 && e->Iex.Const.con->tag == Ico_U32
306 && e->Iex.Const.con->Ico.U32 == n );
309 IRExpr* guest_arm_spechelper ( HChar* function_name,
311 IRStmt** precedingStmts,
312 Int n_precedingStmts )
314 # define unop(_op,_a1) IRExpr_Unop((_op),(_a1))
315 # define binop(_op,_a1,_a2) IRExpr_Binop((_op),(_a1),(_a2))
316 # define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
317 # define mkU8(_n) IRExpr_Const(IRConst_U8(_n))
320 for (i = 0; args[i]; i++)
323 vex_printf("spec request:\n");
324 vex_printf(" %s ", function_name);
325 for (i = 0; i < arity; i++) {
332 /* --------- specialising "armg_calculate_condition" --------- */
334 if (vex_streq(function_name, "armg_calculate_condition")) {
335 /* specialise calls to above "armg_calculate condition" function */
336 IRExpr *cond_n_op, *cc_dep1, *cc_dep2, *cc_dep3;
338 cond_n_op = args[0]; /* ARMCondcode << 4 | ARMG_CC_OP_* */
343 /*---------------- SUB ----------------*/
345 if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_SUB)) {
346 /* EQ after SUB --> test argL == argR */
347 return unop(Iop_1Uto32,
348 binop(Iop_CmpEQ32, cc_dep1, cc_dep2));
350 if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_SUB)) {
351 /* NE after SUB --> test argL != argR */
352 return unop(Iop_1Uto32,
353 binop(Iop_CmpNE32, cc_dep1, cc_dep2));
356 if (isU32(cond_n_op, (ARMCondLE << 4) | ARMG_CC_OP_SUB)) {
357 /* LE after SUB --> test argL <=s argR */
358 return unop(Iop_1Uto32,
359 binop(Iop_CmpLE32S, cc_dep1, cc_dep2));
362 if (isU32(cond_n_op, (ARMCondLT << 4) | ARMG_CC_OP_SUB)) {
363 /* LT after SUB --> test argL <s argR */
364 return unop(Iop_1Uto32,
365 binop(Iop_CmpLT32S, cc_dep1, cc_dep2));
368 if (isU32(cond_n_op, (ARMCondGE << 4) | ARMG_CC_OP_SUB)) {
369 /* GE after SUB --> test argL >=s argR
370 --> test argR <=s argL */
371 return unop(Iop_1Uto32,
372 binop(Iop_CmpLE32S, cc_dep2, cc_dep1));
375 if (isU32(cond_n_op, (ARMCondHS << 4) | ARMG_CC_OP_SUB)) {
376 /* HS after SUB --> test argL >=u argR
377 --> test argR <=u argL */
378 return unop(Iop_1Uto32,
379 binop(Iop_CmpLE32U, cc_dep2, cc_dep1));
382 if (isU32(cond_n_op, (ARMCondLS << 4) | ARMG_CC_OP_SUB)) {
383 /* LS after SUB --> test argL <=u argR */
384 return unop(Iop_1Uto32,
385 binop(Iop_CmpLE32U, cc_dep1, cc_dep2));
388 /*---------------- LOGIC ----------------*/
389 if (isU32(cond_n_op, (ARMCondEQ << 4) | ARMG_CC_OP_LOGIC)) {
390 /* EQ after LOGIC --> test res == 0 */
391 return unop(Iop_1Uto32,
392 binop(Iop_CmpEQ32, cc_dep1, mkU32(0)));
394 if (isU32(cond_n_op, (ARMCondNE << 4) | ARMG_CC_OP_LOGIC)) {
395 /* NE after LOGIC --> test res != 0 */
396 return unop(Iop_1Uto32,
397 binop(Iop_CmpNE32, cc_dep1, mkU32(0)));
400 /*----------------- AL -----------------*/
401 /* A critically important case for Thumb code.
403 What we're trying to spot is the case where cond_n_op is an
404 expression of the form Or32(..., 0xE0) since that means the
405 caller is asking for CondAL and we can simply return 1
406 without caring what the ... part is. This is a potentially
407 dodgy kludge in that it assumes that the ... part has zeroes
408 in bits 7:4, so that the result of the Or32 is guaranteed to
409 be 0xE in bits 7:4. Given that the places where this first
410 arg are constructed (in guest_arm_toIR.c) are very
411 constrained, we can get away with this. To make this
412 guaranteed safe would require to have a new primop, Slice44
415 Slice44(arg1, arg2) = 0--(24)--0 arg1[7:4] arg2[3:0]
417 and we would then look for Slice44(0xE0, ...)
418 which would give the required safety property.
420 It would be infeasibly expensive to scan backwards through
421 the entire block looking for an assignment to the temp, so
422 just look at the previous 16 statements. That should find it
423 if it is an interesting case, as a result of how the
424 boilerplate guff at the start of each Thumb insn translation
427 if (cond_n_op->tag == Iex_RdTmp) {
429 IRTemp look_for = cond_n_op->Iex.RdTmp.tmp;
430 Int limit = n_precedingStmts - 16;
431 if (limit < 0) limit = 0;
432 if (0) vex_printf("scanning %d .. %d\n", n_precedingStmts-1, limit);
433 for (j = n_precedingStmts - 1; j >= limit; j--) {
434 IRStmt* st = precedingStmts[j];
435 if (st->tag == Ist_WrTmp
436 && st->Ist.WrTmp.tmp == look_for
437 && st->Ist.WrTmp.data->tag == Iex_Binop
438 && st->Ist.WrTmp.data->Iex.Binop.op == Iop_Or32
439 && isU32(st->Ist.WrTmp.data->Iex.Binop.arg2, (ARMCondAL << 4)))
442 /* Didn't find any useful binding to the first arg
443 in the previous 16 stmts. */
456 /*----------------------------------------------*/
457 /*--- The exported fns .. ---*/
458 /*----------------------------------------------*/
460 /* VISIBLE TO LIBVEX CLIENT */
462 void LibVEX_GuestARM_put_flags ( UInt flags_native,
463 /*OUT*/VexGuestARMState* vex_state )
467 /* Mask out everything except N Z V C. */
469 &= (ARMG_CC_MASK_N | ARMG_CC_MASK_Z | ARMG_CC_MASK_V | ARMG_CC_MASK_C);
471 vex_state->guest_CC_OP = ARMG_CC_OP_COPY;
472 vex_state->guest_CC_DEP1 = flags_native;
473 vex_state->guest_CC_DEP2 = 0;
474 vex_state->guest_CC_NDEP = 0;
478 /* VISIBLE TO LIBVEX CLIENT */
479 UInt LibVEX_GuestARM_get_cpsr ( /*IN*/VexGuestARMState* vex_state )
482 nzcv = armg_calculate_flags_nzcv(
483 vex_state->guest_CC_OP,
484 vex_state->guest_CC_DEP1,
485 vex_state->guest_CC_DEP2,
486 vex_state->guest_CC_NDEP
491 /* VISIBLE TO LIBVEX CLIENT */
492 void LibVEX_GuestARM_initialise ( /*OUT*/VexGuestARMState* vex_state )
494 vex_state->guest_R0 = 0;
495 vex_state->guest_R1 = 0;
496 vex_state->guest_R2 = 0;
497 vex_state->guest_R3 = 0;
498 vex_state->guest_R4 = 0;
499 vex_state->guest_R5 = 0;
500 vex_state->guest_R6 = 0;
501 vex_state->guest_R7 = 0;
502 vex_state->guest_R8 = 0;
503 vex_state->guest_R9 = 0;
504 vex_state->guest_R10 = 0;
505 vex_state->guest_R11 = 0;
506 vex_state->guest_R12 = 0;
507 vex_state->guest_R13 = 0;
508 vex_state->guest_R14 = 0;
509 vex_state->guest_R15T = 0; /* NB: implies ARM mode */
511 vex_state->guest_CC_OP = ARMG_CC_OP_COPY;
512 vex_state->guest_CC_DEP1 = 0;
513 vex_state->guest_CC_DEP2 = 0;
514 vex_state->guest_CC_NDEP = 0;
515 vex_state->guest_QFLAG32 = 0;
517 vex_state->guest_EMWARN = 0;
518 vex_state->guest_TISTART = 0;
519 vex_state->guest_TILEN = 0;
520 vex_state->guest_NRADDR = 0;
521 vex_state->guest_IP_AT_SYSCALL = 0;
523 vex_state->guest_D0 = 0;
524 vex_state->guest_D1 = 0;
525 vex_state->guest_D2 = 0;
526 vex_state->guest_D3 = 0;
527 vex_state->guest_D4 = 0;
528 vex_state->guest_D5 = 0;
529 vex_state->guest_D6 = 0;
530 vex_state->guest_D7 = 0;
531 vex_state->guest_D8 = 0;
532 vex_state->guest_D9 = 0;
533 vex_state->guest_D10 = 0;
534 vex_state->guest_D11 = 0;
535 vex_state->guest_D12 = 0;
536 vex_state->guest_D13 = 0;
537 vex_state->guest_D14 = 0;
538 vex_state->guest_D15 = 0;
539 vex_state->guest_D16 = 0;
540 vex_state->guest_D17 = 0;
541 vex_state->guest_D18 = 0;
542 vex_state->guest_D19 = 0;
543 vex_state->guest_D20 = 0;
544 vex_state->guest_D21 = 0;
545 vex_state->guest_D22 = 0;
546 vex_state->guest_D23 = 0;
547 vex_state->guest_D24 = 0;
548 vex_state->guest_D25 = 0;
549 vex_state->guest_D26 = 0;
550 vex_state->guest_D27 = 0;
551 vex_state->guest_D28 = 0;
552 vex_state->guest_D29 = 0;
553 vex_state->guest_D30 = 0;
554 vex_state->guest_D31 = 0;
556 /* ARM encoded; zero is the default as it happens (result flags
557 (NZCV) cleared, FZ disabled, round to nearest, non-vector mode,
558 all exns masked, all exn sticky bits cleared). */
559 vex_state->guest_FPSCR = 0;
561 vex_state->guest_TPIDRURO = 0;
563 /* Not in a Thumb IT block. */
564 vex_state->guest_ITSTATE = 0;
566 vex_state->padding1 = 0;
567 vex_state->padding2 = 0;
568 vex_state->padding3 = 0;
572 /*-----------------------------------------------------------*/
573 /*--- Describing the arm guest state, for the benefit ---*/
574 /*--- of iropt and instrumenters. ---*/
575 /*-----------------------------------------------------------*/
577 /* Figure out if any part of the guest state contained in minoff
578 .. maxoff requires precise memory exceptions. If in doubt return
579 True (but this is generates significantly slower code).
581 We enforce precise exns for guest R13(sp), R15T(pc).
583 Bool guest_arm_state_requires_precise_mem_exns ( Int minoff,
586 Int sp_min = offsetof(VexGuestARMState, guest_R13);
587 Int sp_max = sp_min + 4 - 1;
588 Int pc_min = offsetof(VexGuestARMState, guest_R15T);
589 Int pc_max = pc_min + 4 - 1;
591 if (maxoff < sp_min || minoff > sp_max) {
592 /* no overlap with sp */
597 if (maxoff < pc_min || minoff > pc_max) {
598 /* no overlap with pc */
603 /* We appear to need precise updates of R11 in order to get proper
604 stacktraces from non-optimised code. */
605 Int r11_min = offsetof(VexGuestARMState, guest_R11);
606 Int r11_max = r11_min + 4 - 1;
608 if (maxoff < r11_min || minoff > r11_max) {
609 /* no overlap with pc */
619 #define ALWAYSDEFD(field) \
620 { offsetof(VexGuestARMState, field), \
621 (sizeof ((VexGuestARMState*)0)->field) }
626 /* Total size of the guest state, in bytes. */
627 .total_sizeB = sizeof(VexGuestARMState),
629 /* Describe the stack pointer. */
630 .offset_SP = offsetof(VexGuestARMState,guest_R13),
633 /* Describe the instruction pointer. */
634 .offset_IP = offsetof(VexGuestARMState,guest_R15T),
637 /* Describe any sections to be regarded by Memcheck as
641 /* flags thunk: OP is always defd, whereas DEP1 and DEP2
642 have to be tracked. See detailed comment in gdefs.h on
643 meaning of thunk fields. */
645 = { /* 0 */ ALWAYSDEFD(guest_R15T),
646 /* 1 */ ALWAYSDEFD(guest_CC_OP),
647 /* 2 */ ALWAYSDEFD(guest_CC_NDEP),
648 /* 3 */ ALWAYSDEFD(guest_EMWARN),
649 /* 4 */ ALWAYSDEFD(guest_TISTART),
650 /* 5 */ ALWAYSDEFD(guest_TILEN),
651 /* 6 */ ALWAYSDEFD(guest_NRADDR),
652 /* 7 */ ALWAYSDEFD(guest_IP_AT_SYSCALL),
653 /* 8 */ ALWAYSDEFD(guest_TPIDRURO),
654 /* 9 */ ALWAYSDEFD(guest_ITSTATE)
659 /*---------------------------------------------------------------*/
660 /*--- end guest_arm_helpers.c ---*/
661 /*---------------------------------------------------------------*/