3 #include "exec/gdbstub.h"
5 #include "qemu/host-utils.h"
6 #include "sysemu/arch_init.h"
7 #include "sysemu/sysemu.h"
8 #include "qemu/bitops.h"
9 #include "qemu/crc32c.h"
10 #include <zlib.h> /* For crc32 */
12 #ifndef CONFIG_USER_ONLY
13 #include "exec/softmmu_exec.h"
15 static inline int get_phys_addr(CPUARMState *env, target_ulong address,
16 int access_type, int is_user,
17 hwaddr *phys_ptr, int *prot,
18 target_ulong *page_size);
20 /* Definitions for the PMCCNTR and PMCR registers */
26 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
30 /* VFP data registers are always little-endian. */
31 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
33 stfq_le_p(buf, env->vfp.regs[reg]);
36 if (arm_feature(env, ARM_FEATURE_NEON)) {
37 /* Aliases for Q regs. */
40 stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
41 stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
45 switch (reg - nregs) {
46 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
47 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
48 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
53 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
57 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
59 env->vfp.regs[reg] = ldfq_le_p(buf);
62 if (arm_feature(env, ARM_FEATURE_NEON)) {
65 env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
66 env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
70 switch (reg - nregs) {
71 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
72 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
73 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
78 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
82 /* 128 bit FP register */
83 stfq_le_p(buf, env->vfp.regs[reg * 2]);
84 stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
88 stl_p(buf, vfp_get_fpsr(env));
92 stl_p(buf, vfp_get_fpcr(env));
99 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
103 /* 128 bit FP register */
104 env->vfp.regs[reg * 2] = ldfq_le_p(buf);
105 env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
109 vfp_set_fpsr(env, ldl_p(buf));
113 vfp_set_fpcr(env, ldl_p(buf));
120 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
122 if (cpreg_field_is_64bit(ri)) {
123 return CPREG_FIELD64(env, ri);
125 return CPREG_FIELD32(env, ri);
129 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
132 if (cpreg_field_is_64bit(ri)) {
133 CPREG_FIELD64(env, ri) = value;
135 CPREG_FIELD32(env, ri) = value;
139 static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
141 /* Raw read of a coprocessor register (as needed for migration, etc). */
142 if (ri->type & ARM_CP_CONST) {
143 return ri->resetvalue;
144 } else if (ri->raw_readfn) {
145 return ri->raw_readfn(env, ri);
146 } else if (ri->readfn) {
147 return ri->readfn(env, ri);
149 return raw_read(env, ri);
153 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
156 /* Raw write of a coprocessor register (as needed for migration, etc).
157 * Note that constant registers are treated as write-ignored; the
158 * caller should check for success by whether a readback gives the
161 if (ri->type & ARM_CP_CONST) {
163 } else if (ri->raw_writefn) {
164 ri->raw_writefn(env, ri, v);
165 } else if (ri->writefn) {
166 ri->writefn(env, ri, v);
168 raw_write(env, ri, v);
172 bool write_cpustate_to_list(ARMCPU *cpu)
174 /* Write the coprocessor state from cpu->env to the (index,value) list. */
178 for (i = 0; i < cpu->cpreg_array_len; i++) {
179 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
180 const ARMCPRegInfo *ri;
182 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
187 if (ri->type & ARM_CP_NO_MIGRATE) {
190 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
195 bool write_list_to_cpustate(ARMCPU *cpu)
200 for (i = 0; i < cpu->cpreg_array_len; i++) {
201 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
202 uint64_t v = cpu->cpreg_values[i];
203 const ARMCPRegInfo *ri;
205 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
210 if (ri->type & ARM_CP_NO_MIGRATE) {
213 /* Write value and confirm it reads back as written
214 * (to catch read-only registers and partially read-only
215 * registers where the incoming migration value doesn't match)
217 write_raw_cp_reg(&cpu->env, ri, v);
218 if (read_raw_cp_reg(&cpu->env, ri) != v) {
225 static void add_cpreg_to_list(gpointer key, gpointer opaque)
227 ARMCPU *cpu = opaque;
229 const ARMCPRegInfo *ri;
231 regidx = *(uint32_t *)key;
232 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
234 if (!(ri->type & ARM_CP_NO_MIGRATE)) {
235 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
236 /* The value array need not be initialized at this point */
237 cpu->cpreg_array_len++;
241 static void count_cpreg(gpointer key, gpointer opaque)
243 ARMCPU *cpu = opaque;
245 const ARMCPRegInfo *ri;
247 regidx = *(uint32_t *)key;
248 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
250 if (!(ri->type & ARM_CP_NO_MIGRATE)) {
251 cpu->cpreg_array_len++;
255 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
257 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
258 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
269 static void cpreg_make_keylist(gpointer key, gpointer value, gpointer udata)
271 GList **plist = udata;
273 *plist = g_list_prepend(*plist, key);
276 void init_cpreg_list(ARMCPU *cpu)
278 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
279 * Note that we require cpreg_tuples[] to be sorted by key ID.
284 g_hash_table_foreach(cpu->cp_regs, cpreg_make_keylist, &keys);
286 keys = g_list_sort(keys, cpreg_key_compare);
288 cpu->cpreg_array_len = 0;
290 g_list_foreach(keys, count_cpreg, cpu);
292 arraylen = cpu->cpreg_array_len;
293 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
294 cpu->cpreg_values = g_new(uint64_t, arraylen);
295 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
296 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
297 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
298 cpu->cpreg_array_len = 0;
300 g_list_foreach(keys, add_cpreg_to_list, cpu);
302 assert(cpu->cpreg_array_len == arraylen);
307 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
309 ARMCPU *cpu = arm_env_get_cpu(env);
311 env->cp15.c3 = value;
312 tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
315 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
317 ARMCPU *cpu = arm_env_get_cpu(env);
319 if (env->cp15.c13_fcse != value) {
320 /* Unlike real hardware the qemu TLB uses virtual addresses,
321 * not modified virtual addresses, so this causes a TLB flush.
323 tlb_flush(CPU(cpu), 1);
324 env->cp15.c13_fcse = value;
328 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
331 ARMCPU *cpu = arm_env_get_cpu(env);
333 if (env->cp15.c13_context != value && !arm_feature(env, ARM_FEATURE_MPU)) {
334 /* For VMSA (when not using the LPAE long descriptor page table
335 * format) this register includes the ASID, so do a TLB flush.
336 * For PMSA it is purely a process ID and no action is needed.
338 tlb_flush(CPU(cpu), 1);
340 env->cp15.c13_context = value;
343 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
346 /* Invalidate all (TLBIALL) */
347 ARMCPU *cpu = arm_env_get_cpu(env);
349 tlb_flush(CPU(cpu), 1);
352 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
355 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
356 ARMCPU *cpu = arm_env_get_cpu(env);
358 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
361 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
364 /* Invalidate by ASID (TLBIASID) */
365 ARMCPU *cpu = arm_env_get_cpu(env);
367 tlb_flush(CPU(cpu), value == 0);
370 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
373 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
374 ARMCPU *cpu = arm_env_get_cpu(env);
376 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
379 static const ARMCPRegInfo cp_reginfo[] = {
380 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
381 * version" bits will read as a reserved value, which should cause
382 * Linux to not try to use the debug hardware.
384 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
385 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
386 /* MMU Domain access control / MPU write buffer control */
387 { .name = "DACR", .cp = 15,
388 .crn = 3, .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
389 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c3),
390 .resetvalue = 0, .writefn = dacr_write, .raw_writefn = raw_write, },
391 { .name = "FCSEIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 0,
392 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
393 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
394 { .name = "CONTEXTIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 1,
395 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_context),
396 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
397 /* ??? This covers not just the impdef TLB lockdown registers but also
398 * some v7VMSA registers relating to TEX remap, so it is overly broad.
400 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = CP_ANY,
401 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
402 /* MMU TLB control. Note that the wildcarding means we cover not just
403 * the unified TLB ops but also the dside/iside/inner-shareable variants.
405 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
406 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
407 .type = ARM_CP_NO_MIGRATE },
408 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
409 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
410 .type = ARM_CP_NO_MIGRATE },
411 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
412 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
413 .type = ARM_CP_NO_MIGRATE },
414 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
415 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
416 .type = ARM_CP_NO_MIGRATE },
417 /* Cache maintenance ops; some of this space may be overridden later. */
418 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
419 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
420 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
424 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
425 /* Not all pre-v6 cores implemented this WFI, so this is slightly
428 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
429 .access = PL1_W, .type = ARM_CP_WFI },
433 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
434 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
435 * is UNPREDICTABLE; we choose to NOP as most implementations do).
437 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
438 .access = PL1_W, .type = ARM_CP_WFI },
439 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
440 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
441 * OMAPCP will override this space.
443 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
444 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
446 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
447 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
449 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
450 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
451 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
456 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
459 if (env->cp15.c1_coproc != value) {
460 env->cp15.c1_coproc = value;
461 /* ??? Is this safe when called from within a TB? */
466 static const ARMCPRegInfo v6_cp_reginfo[] = {
467 /* prefetch by MVA in v6, NOP in v7 */
468 { .name = "MVA_prefetch",
469 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
470 .access = PL1_W, .type = ARM_CP_NOP },
471 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
472 .access = PL0_W, .type = ARM_CP_NOP },
473 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
474 .access = PL0_W, .type = ARM_CP_NOP },
475 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
476 .access = PL0_W, .type = ARM_CP_NOP },
477 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
479 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el1),
481 /* Watchpoint Fault Address Register : should actually only be present
482 * for 1136, 1176, 11MPCore.
484 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
485 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
486 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
487 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2,
488 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_coproc),
489 .resetvalue = 0, .writefn = cpacr_write },
493 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
495 /* Performance monitor registers user accessibility is controlled
498 if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
499 return CP_ACCESS_TRAP;
504 #ifndef CONFIG_USER_ONLY
505 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
508 /* Don't computer the number of ticks in user mode */
511 temp_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
512 get_ticks_per_sec() / 1000000;
514 if (env->cp15.c9_pmcr & PMCRE) {
515 /* If the counter is enabled */
516 if (env->cp15.c9_pmcr & PMCRD) {
517 /* Increment once every 64 processor clock cycles */
518 env->cp15.c15_ccnt = (temp_ticks/64) - env->cp15.c15_ccnt;
520 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
525 /* The counter has been reset */
526 env->cp15.c15_ccnt = 0;
529 /* only the DP, X, D and E bits are writable */
530 env->cp15.c9_pmcr &= ~0x39;
531 env->cp15.c9_pmcr |= (value & 0x39);
533 if (env->cp15.c9_pmcr & PMCRE) {
534 if (env->cp15.c9_pmcr & PMCRD) {
535 /* Increment once every 64 processor clock cycles */
538 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
542 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
544 uint32_t total_ticks;
546 if (!(env->cp15.c9_pmcr & PMCRE)) {
547 /* Counter is disabled, do not change value */
548 return env->cp15.c15_ccnt;
551 total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
552 get_ticks_per_sec() / 1000000;
554 if (env->cp15.c9_pmcr & PMCRD) {
555 /* Increment once every 64 processor clock cycles */
558 return total_ticks - env->cp15.c15_ccnt;
561 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
564 uint32_t total_ticks;
566 if (!(env->cp15.c9_pmcr & PMCRE)) {
567 /* Counter is disabled, set the absolute value */
568 env->cp15.c15_ccnt = value;
572 total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
573 get_ticks_per_sec() / 1000000;
575 if (env->cp15.c9_pmcr & PMCRD) {
576 /* Increment once every 64 processor clock cycles */
579 env->cp15.c15_ccnt = total_ticks - value;
583 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
587 env->cp15.c9_pmcnten |= value;
590 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
594 env->cp15.c9_pmcnten &= ~value;
597 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
600 env->cp15.c9_pmovsr &= ~value;
603 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
606 env->cp15.c9_pmxevtyper = value & 0xff;
609 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
612 env->cp15.c9_pmuserenr = value & 1;
615 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
618 /* We have no event counters so only the C bit can be changed */
620 env->cp15.c9_pminten |= value;
623 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
627 env->cp15.c9_pminten &= ~value;
630 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
633 /* Note that even though the AArch64 view of this register has bits
634 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
635 * architectural requirements for bits which are RES0 only in some
636 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
637 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
639 env->cp15.c12_vbar = value & ~0x1Ful;
642 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
644 ARMCPU *cpu = arm_env_get_cpu(env);
645 return cpu->ccsidr[env->cp15.c0_cssel];
648 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
651 env->cp15.c0_cssel = value & 0xf;
654 static const ARMCPRegInfo v7_cp_reginfo[] = {
655 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
658 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
659 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
660 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
661 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
662 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
663 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
664 .access = PL1_W, .type = ARM_CP_NOP },
665 /* Performance monitors are implementation defined in v7,
666 * but with an ARM recommended set of registers, which we
667 * follow (although we don't actually implement any counters)
669 * Performance registers fall into three categories:
670 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
671 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
672 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
673 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
674 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
676 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
677 .access = PL0_RW, .resetvalue = 0,
678 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
679 .writefn = pmcntenset_write,
680 .accessfn = pmreg_access,
681 .raw_writefn = raw_write },
682 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
683 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
684 .accessfn = pmreg_access,
685 .writefn = pmcntenclr_write,
686 .type = ARM_CP_NO_MIGRATE },
687 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
688 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
689 .accessfn = pmreg_access,
690 .writefn = pmovsr_write,
691 .raw_writefn = raw_write },
692 /* Unimplemented so WI. */
693 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
694 .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
695 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
696 * We choose to RAZ/WI.
698 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
699 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
700 .accessfn = pmreg_access },
701 #ifndef CONFIG_USER_ONLY
702 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
703 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
704 .readfn = pmccntr_read, .writefn = pmccntr_write,
705 .accessfn = pmreg_access },
707 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
709 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
710 .accessfn = pmreg_access, .writefn = pmxevtyper_write,
711 .raw_writefn = raw_write },
712 /* Unimplemented, RAZ/WI. */
713 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
714 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
715 .accessfn = pmreg_access },
716 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
717 .access = PL0_R | PL1_RW,
718 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
720 .writefn = pmuserenr_write, .raw_writefn = raw_write },
721 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
723 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
725 .writefn = pmintenset_write, .raw_writefn = raw_write },
726 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
727 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
728 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
729 .resetvalue = 0, .writefn = pmintenclr_write, },
730 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
731 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
732 .access = PL1_RW, .writefn = vbar_write,
733 .fieldoffset = offsetof(CPUARMState, cp15.c12_vbar),
735 { .name = "SCR", .cp = 15, .crn = 1, .crm = 1, .opc1 = 0, .opc2 = 0,
736 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_scr),
738 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
739 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
740 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_MIGRATE },
741 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
742 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
743 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c0_cssel),
744 .writefn = csselr_write, .resetvalue = 0 },
745 /* Auxiliary ID register: this actually has an IMPDEF value but for now
746 * just RAZ for all cores:
748 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
749 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
750 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
751 /* MAIR can just read-as-written because we don't implement caches
752 * and so don't need to care about memory attributes.
754 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
755 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
756 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el1),
758 /* For non-long-descriptor page tables these are PRRR and NMRR;
759 * regardless they still act as reads-as-written for QEMU.
760 * The override is necessary because of the overly-broad TLB_LOCKDOWN
763 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
764 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
765 .fieldoffset = offsetoflow32(CPUARMState, cp15.mair_el1),
766 .resetfn = arm_cp_reset_ignore },
767 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
768 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
769 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el1),
770 .resetfn = arm_cp_reset_ignore },
774 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
781 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri)
783 if (arm_current_pl(env) == 0 && (env->teecr & 1)) {
784 return CP_ACCESS_TRAP;
789 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
790 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
791 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
793 .writefn = teecr_write },
794 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
795 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
796 .accessfn = teehbr_access, .resetvalue = 0 },
800 static const ARMCPRegInfo v6k_cp_reginfo[] = {
801 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
802 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
804 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el0), .resetvalue = 0 },
805 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
807 .fieldoffset = offsetoflow32(CPUARMState, cp15.tpidr_el0),
808 .resetfn = arm_cp_reset_ignore },
809 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
810 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
811 .access = PL0_R|PL1_W,
812 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el0), .resetvalue = 0 },
813 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
814 .access = PL0_R|PL1_W,
815 .fieldoffset = offsetoflow32(CPUARMState, cp15.tpidrro_el0),
816 .resetfn = arm_cp_reset_ignore },
817 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_BOTH,
818 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
820 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el1), .resetvalue = 0 },
824 #ifndef CONFIG_USER_ONLY
826 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
828 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
829 if (arm_current_pl(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
830 return CP_ACCESS_TRAP;
835 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx)
837 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
838 if (arm_current_pl(env) == 0 &&
839 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
840 return CP_ACCESS_TRAP;
845 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx)
847 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
848 * EL0[PV]TEN is zero.
850 if (arm_current_pl(env) == 0 &&
851 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
852 return CP_ACCESS_TRAP;
857 static CPAccessResult gt_pct_access(CPUARMState *env,
858 const ARMCPRegInfo *ri)
860 return gt_counter_access(env, GTIMER_PHYS);
863 static CPAccessResult gt_vct_access(CPUARMState *env,
864 const ARMCPRegInfo *ri)
866 return gt_counter_access(env, GTIMER_VIRT);
869 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
871 return gt_timer_access(env, GTIMER_PHYS);
874 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
876 return gt_timer_access(env, GTIMER_VIRT);
879 static uint64_t gt_get_countervalue(CPUARMState *env)
881 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
884 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
886 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
889 /* Timer enabled: calculate and set current ISTATUS, irq, and
890 * reset timer to when ISTATUS next has to change
892 uint64_t count = gt_get_countervalue(&cpu->env);
893 /* Note that this must be unsigned 64 bit arithmetic: */
894 int istatus = count >= gt->cval;
897 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
898 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
899 (istatus && !(gt->ctl & 2)));
901 /* Next transition is when count rolls back over to zero */
902 nexttick = UINT64_MAX;
904 /* Next transition is when we hit cval */
907 /* Note that the desired next expiry time might be beyond the
908 * signed-64-bit range of a QEMUTimer -- in this case we just
909 * set the timer for as far in the future as possible. When the
910 * timer expires we will reset the timer for any remaining period.
912 if (nexttick > INT64_MAX / GTIMER_SCALE) {
913 nexttick = INT64_MAX / GTIMER_SCALE;
915 timer_mod(cpu->gt_timer[timeridx], nexttick);
917 /* Timer disabled: ISTATUS and timer output always clear */
919 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
920 timer_del(cpu->gt_timer[timeridx]);
924 static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
926 ARMCPU *cpu = arm_env_get_cpu(env);
927 int timeridx = ri->opc1 & 1;
929 timer_del(cpu->gt_timer[timeridx]);
932 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
934 return gt_get_countervalue(env);
937 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
940 int timeridx = ri->opc1 & 1;
942 env->cp15.c14_timer[timeridx].cval = value;
943 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
946 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
948 int timeridx = ri->crm & 1;
950 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
951 gt_get_countervalue(env));
954 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
957 int timeridx = ri->crm & 1;
959 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) +
960 + sextract64(value, 0, 32);
961 gt_recalc_timer(arm_env_get_cpu(env), timeridx);
964 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
967 ARMCPU *cpu = arm_env_get_cpu(env);
968 int timeridx = ri->crm & 1;
969 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
971 env->cp15.c14_timer[timeridx].ctl = value & 3;
972 if ((oldval ^ value) & 1) {
974 gt_recalc_timer(cpu, timeridx);
975 } else if ((oldval & value) & 2) {
976 /* IMASK toggled: don't need to recalculate,
977 * just set the interrupt line based on ISTATUS
979 qemu_set_irq(cpu->gt_timer_outputs[timeridx],
980 (oldval & 4) && (value & 2));
984 void arm_gt_ptimer_cb(void *opaque)
986 ARMCPU *cpu = opaque;
988 gt_recalc_timer(cpu, GTIMER_PHYS);
991 void arm_gt_vtimer_cb(void *opaque)
993 ARMCPU *cpu = opaque;
995 gt_recalc_timer(cpu, GTIMER_VIRT);
998 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
999 /* Note that CNTFRQ is purely reads-as-written for the benefit
1000 * of software; writing it doesn't actually change the timer frequency.
1001 * Our reset value matches the fixed frequency we implement the timer at.
1003 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1004 .type = ARM_CP_NO_MIGRATE,
1005 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1006 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1007 .resetfn = arm_cp_reset_ignore,
1009 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1010 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1011 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1012 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1013 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1015 /* overall control: mostly access permissions */
1016 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1017 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1019 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1022 /* per-timer control */
1023 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1024 .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
1025 .accessfn = gt_ptimer_access,
1026 .fieldoffset = offsetoflow32(CPUARMState,
1027 cp15.c14_timer[GTIMER_PHYS].ctl),
1028 .resetfn = arm_cp_reset_ignore,
1029 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1031 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1032 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1033 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1034 .accessfn = gt_ptimer_access,
1035 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1037 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1039 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1040 .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
1041 .accessfn = gt_vtimer_access,
1042 .fieldoffset = offsetoflow32(CPUARMState,
1043 cp15.c14_timer[GTIMER_VIRT].ctl),
1044 .resetfn = arm_cp_reset_ignore,
1045 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1047 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1048 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1049 .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1050 .accessfn = gt_vtimer_access,
1051 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1053 .writefn = gt_ctl_write, .raw_writefn = raw_write,
1055 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1056 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1057 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
1058 .accessfn = gt_ptimer_access,
1059 .readfn = gt_tval_read, .writefn = gt_tval_write,
1061 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1062 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1063 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
1064 .readfn = gt_tval_read, .writefn = gt_tval_write,
1066 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1067 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
1068 .accessfn = gt_vtimer_access,
1069 .readfn = gt_tval_read, .writefn = gt_tval_write,
1071 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1072 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1073 .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
1074 .readfn = gt_tval_read, .writefn = gt_tval_write,
1076 /* The counter itself */
1077 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1078 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
1079 .accessfn = gt_pct_access,
1080 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1082 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1083 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1084 .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
1085 .accessfn = gt_pct_access,
1086 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
1088 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1089 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
1090 .accessfn = gt_vct_access,
1091 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1093 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1094 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1095 .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
1096 .accessfn = gt_vct_access,
1097 .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
1099 /* Comparison value, indicating when the timer goes off */
1100 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1101 .access = PL1_RW | PL0_R,
1102 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
1103 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1104 .accessfn = gt_ptimer_access, .resetfn = arm_cp_reset_ignore,
1105 .writefn = gt_cval_write, .raw_writefn = raw_write,
1107 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1108 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1109 .access = PL1_RW | PL0_R,
1111 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1112 .resetvalue = 0, .accessfn = gt_vtimer_access,
1113 .writefn = gt_cval_write, .raw_writefn = raw_write,
1115 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1116 .access = PL1_RW | PL0_R,
1117 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
1118 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1119 .accessfn = gt_vtimer_access, .resetfn = arm_cp_reset_ignore,
1120 .writefn = gt_cval_write, .raw_writefn = raw_write,
1122 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1123 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1124 .access = PL1_RW | PL0_R,
1126 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1127 .resetvalue = 0, .accessfn = gt_vtimer_access,
1128 .writefn = gt_cval_write, .raw_writefn = raw_write,
1134 /* In user-mode none of the generic timer registers are accessible,
1135 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1136 * so instead just don't register any of them.
1138 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1144 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1146 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1147 env->cp15.c7_par = value;
1148 } else if (arm_feature(env, ARM_FEATURE_V7)) {
1149 env->cp15.c7_par = value & 0xfffff6ff;
1151 env->cp15.c7_par = value & 0xfffff1ff;
1155 #ifndef CONFIG_USER_ONLY
1156 /* get_phys_addr() isn't present for user-mode-only targets */
1158 /* Return true if extended addresses are enabled.
1159 * This is always the case if our translation regime is 64 bit,
1160 * but depends on TTBCR.EAE for 32 bit.
1162 static inline bool extended_addresses_enabled(CPUARMState *env)
1164 return arm_el_is_aa64(env, 1)
1165 || ((arm_feature(env, ARM_FEATURE_LPAE)
1166 && (env->cp15.c2_control & (1U << 31))));
1169 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
1172 /* Other states are only available with TrustZone; in
1173 * a non-TZ implementation these registers don't exist
1174 * at all, which is an Uncategorized trap. This underdecoding
1175 * is safe because the reginfo is NO_MIGRATE.
1177 return CP_ACCESS_TRAP_UNCATEGORIZED;
1179 return CP_ACCESS_OK;
1182 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1185 target_ulong page_size;
1187 int ret, is_user = ri->opc2 & 2;
1188 int access_type = ri->opc2 & 1;
1190 ret = get_phys_addr(env, value, access_type, is_user,
1191 &phys_addr, &prot, &page_size);
1192 if (extended_addresses_enabled(env)) {
1193 /* ret is a DFSR/IFSR value for the long descriptor
1194 * translation table format, but with WnR always clear.
1195 * Convert it to a 64-bit PAR.
1197 uint64_t par64 = (1 << 11); /* LPAE bit always set */
1199 par64 |= phys_addr & ~0xfffULL;
1200 /* We don't set the ATTR or SH fields in the PAR. */
1203 par64 |= (ret & 0x3f) << 1; /* FS */
1204 /* Note that S2WLK and FSTAGE are always zero, because we don't
1205 * implement virtualization and therefore there can't be a stage 2
1209 env->cp15.c7_par = par64;
1210 env->cp15.c7_par_hi = par64 >> 32;
1212 /* ret is a DFSR/IFSR value for the short descriptor
1213 * translation table format (with WnR always clear).
1214 * Convert it to a 32-bit PAR.
1217 /* We do not set any attribute bits in the PAR */
1218 if (page_size == (1 << 24)
1219 && arm_feature(env, ARM_FEATURE_V7)) {
1220 env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
1222 env->cp15.c7_par = phys_addr & 0xfffff000;
1225 env->cp15.c7_par = ((ret & (1 << 10)) >> 5) |
1226 ((ret & (1 << 12)) >> 6) |
1227 ((ret & 0xf) << 1) | 1;
1229 env->cp15.c7_par_hi = 0;
1234 static const ARMCPRegInfo vapa_cp_reginfo[] = {
1235 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
1236 .access = PL1_RW, .resetvalue = 0,
1237 .fieldoffset = offsetof(CPUARMState, cp15.c7_par),
1238 .writefn = par_write },
1239 #ifndef CONFIG_USER_ONLY
1240 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
1241 .access = PL1_W, .accessfn = ats_access,
1242 .writefn = ats_write, .type = ARM_CP_NO_MIGRATE },
1247 /* Return basic MPU access permission bits. */
1248 static uint32_t simple_mpu_ap_bits(uint32_t val)
1255 for (i = 0; i < 16; i += 2) {
1256 ret |= (val >> i) & mask;
1262 /* Pad basic MPU access permission bits to extended format. */
1263 static uint32_t extended_mpu_ap_bits(uint32_t val)
1270 for (i = 0; i < 16; i += 2) {
1271 ret |= (val & mask) << i;
1277 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1280 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
1283 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1285 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
1288 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
1291 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
1294 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
1296 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
1299 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
1300 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
1301 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
1302 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
1304 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
1305 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1306 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
1307 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
1309 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
1310 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
1312 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
1314 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
1316 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
1318 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1320 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
1321 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1323 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
1324 /* Protection region base and size registers */
1325 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
1326 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1327 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
1328 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
1329 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1330 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
1331 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
1332 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1333 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
1334 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
1335 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1336 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
1337 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
1338 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1339 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
1340 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
1341 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1342 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
1343 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
1344 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1345 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
1346 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
1347 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
1348 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
1352 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
1355 int maskshift = extract32(value, 0, 3);
1357 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & (1 << 31))) {
1358 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
1362 /* Note that we always calculate c2_mask and c2_base_mask, but
1363 * they are only used for short-descriptor tables (ie if EAE is 0);
1364 * for long-descriptor tables the TTBCR fields are used differently
1365 * and the c2_mask and c2_base_mask values are meaningless.
1367 env->cp15.c2_control = value;
1368 env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> maskshift);
1369 env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> maskshift);
1372 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1375 ARMCPU *cpu = arm_env_get_cpu(env);
1377 if (arm_feature(env, ARM_FEATURE_LPAE)) {
1378 /* With LPAE the TTBCR could result in a change of ASID
1379 * via the TTBCR.A1 bit, so do a TLB flush.
1381 tlb_flush(CPU(cpu), 1);
1383 vmsa_ttbcr_raw_write(env, ri, value);
1386 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1388 env->cp15.c2_base_mask = 0xffffc000u;
1389 env->cp15.c2_control = 0;
1390 env->cp15.c2_mask = 0;
1393 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
1396 ARMCPU *cpu = arm_env_get_cpu(env);
1398 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
1399 tlb_flush(CPU(cpu), 1);
1400 env->cp15.c2_control = value;
1403 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1406 /* 64 bit accesses to the TTBRs can change the ASID and so we
1407 * must flush the TLB.
1409 if (cpreg_field_is_64bit(ri)) {
1410 ARMCPU *cpu = arm_env_get_cpu(env);
1412 tlb_flush(CPU(cpu), 1);
1414 raw_write(env, ri, value);
1417 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
1418 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
1419 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
1420 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el1),
1421 .resetfn = arm_cp_reset_ignore, },
1422 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
1424 .fieldoffset = offsetof(CPUARMState, cp15.ifsr_el2), .resetvalue = 0, },
1425 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
1426 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
1428 .fieldoffset = offsetof(CPUARMState, cp15.esr_el1), .resetvalue = 0, },
1429 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
1430 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
1431 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
1432 .writefn = vmsa_ttbr_write, .resetvalue = 0 },
1433 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
1434 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
1435 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
1436 .writefn = vmsa_ttbr_write, .resetvalue = 0 },
1437 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
1438 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1439 .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
1440 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
1441 .fieldoffset = offsetof(CPUARMState, cp15.c2_control) },
1442 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
1443 .access = PL1_RW, .type = ARM_CP_NO_MIGRATE, .writefn = vmsa_ttbcr_write,
1444 .resetfn = arm_cp_reset_ignore, .raw_writefn = vmsa_ttbcr_raw_write,
1445 .fieldoffset = offsetoflow32(CPUARMState, cp15.c2_control) },
1446 /* 64-bit FAR; this entry also gives us the AArch32 DFAR */
1447 { .name = "FAR_EL1", .state = ARM_CP_STATE_BOTH,
1448 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
1449 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el1),
1454 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
1457 env->cp15.c15_ticonfig = value & 0xe7;
1458 /* The OS_TYPE bit in this register changes the reported CPUID! */
1459 env->cp15.c0_cpuid = (value & (1 << 5)) ?
1460 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1463 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1466 env->cp15.c15_threadid = value & 0xffff;
1469 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
1472 /* Wait-for-interrupt (deprecated) */
1473 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
1476 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
1479 /* On OMAP there are registers indicating the max/min index of dcache lines
1480 * containing a dirty line; cache flush operations have to reset these.
1482 env->cp15.c15_i_max = 0x000;
1483 env->cp15.c15_i_min = 0xff0;
1486 static const ARMCPRegInfo omap_cp_reginfo[] = {
1487 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
1488 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
1489 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el1),
1491 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
1492 .access = PL1_RW, .type = ARM_CP_NOP },
1493 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
1495 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
1496 .writefn = omap_ticonfig_write },
1497 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
1499 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
1500 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
1501 .access = PL1_RW, .resetvalue = 0xff0,
1502 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
1503 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
1505 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
1506 .writefn = omap_threadid_write },
1507 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
1508 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
1509 .type = ARM_CP_NO_MIGRATE,
1510 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
1511 /* TODO: Peripheral port remap register:
1512 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1513 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1516 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
1517 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
1518 .type = ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE,
1519 .writefn = omap_cachemaint_write },
1520 { .name = "C9", .cp = 15, .crn = 9,
1521 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
1522 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
1526 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1530 if (env->cp15.c15_cpar != value) {
1531 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1533 env->cp15.c15_cpar = value;
1537 static const ARMCPRegInfo xscale_cp_reginfo[] = {
1538 { .name = "XSCALE_CPAR",
1539 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
1540 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
1541 .writefn = xscale_cpar_write, },
1542 { .name = "XSCALE_AUXCR",
1543 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
1544 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
1549 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
1550 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1551 * implementation of this implementation-defined space.
1552 * Ideally this should eventually disappear in favour of actually
1553 * implementing the correct behaviour for all cores.
1555 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
1556 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
1558 .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE | ARM_CP_OVERRIDE,
1563 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
1564 /* Cache status: RAZ because we have no cache so it's always clean */
1565 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
1566 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1571 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
1572 /* We never have a a block transfer operation in progress */
1573 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
1574 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1576 /* The cache ops themselves: these all NOP for QEMU */
1577 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
1578 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1579 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
1580 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1581 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
1582 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1583 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
1584 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1585 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
1586 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1587 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
1588 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
1592 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
1593 /* The cache test-and-clean instructions always return (1 << 30)
1594 * to indicate that there are no dirty cache lines.
1596 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
1597 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1598 .resetvalue = (1 << 30) },
1599 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
1600 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_MIGRATE,
1601 .resetvalue = (1 << 30) },
1605 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
1606 /* Ignore ReadBuffer accesses */
1607 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
1608 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
1609 .access = PL1_RW, .resetvalue = 0,
1610 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_MIGRATE },
1614 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1616 CPUState *cs = CPU(arm_env_get_cpu(env));
1617 uint32_t mpidr = cs->cpu_index;
1618 /* We don't support setting cluster ID ([8..11]) (known as Aff1
1619 * in later ARM ARM versions), or any of the higher affinity level fields,
1620 * so these bits always RAZ.
1622 if (arm_feature(env, ARM_FEATURE_V7MP)) {
1623 mpidr |= (1U << 31);
1624 /* Cores which are uniprocessor (non-coherent)
1625 * but still implement the MP extensions set
1626 * bit 30. (For instance, A9UP.) However we do
1627 * not currently model any of those cores.
1633 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
1634 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
1635 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
1636 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_MIGRATE },
1640 static uint64_t par64_read(CPUARMState *env, const ARMCPRegInfo *ri)
1642 return ((uint64_t)env->cp15.c7_par_hi << 32) | env->cp15.c7_par;
1645 static void par64_write(CPUARMState *env, const ARMCPRegInfo *ri,
1648 env->cp15.c7_par_hi = value >> 32;
1649 env->cp15.c7_par = value;
1652 static void par64_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1654 env->cp15.c7_par_hi = 0;
1655 env->cp15.c7_par = 0;
1658 static const ARMCPRegInfo lpae_cp_reginfo[] = {
1659 /* NOP AMAIR0/1: the override is because these clash with the rather
1660 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1662 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
1663 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
1664 .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
1666 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
1667 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
1668 .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
1670 /* 64 bit access versions of the (dummy) debug registers */
1671 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
1672 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
1673 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
1674 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
1675 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
1676 .access = PL1_RW, .type = ARM_CP_64BIT,
1677 .readfn = par64_read, .writefn = par64_write, .resetfn = par64_reset },
1678 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
1679 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
1680 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
1681 .writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
1682 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
1683 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
1684 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
1685 .writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
1689 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1691 return vfp_get_fpcr(env);
1694 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1697 vfp_set_fpcr(env, value);
1700 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1702 return vfp_get_fpsr(env);
1705 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1708 vfp_set_fpsr(env, value);
1711 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
1713 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
1714 return CP_ACCESS_TRAP;
1716 return CP_ACCESS_OK;
1719 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
1722 env->daif = value & PSTATE_DAIF;
1725 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
1726 const ARMCPRegInfo *ri)
1728 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
1729 * SCTLR_EL1.UCI is set.
1731 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCI)) {
1732 return CP_ACCESS_TRAP;
1734 return CP_ACCESS_OK;
1737 static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
1740 /* Invalidate by VA (AArch64 version) */
1741 ARMCPU *cpu = arm_env_get_cpu(env);
1742 uint64_t pageaddr = value << 12;
1743 tlb_flush_page(CPU(cpu), pageaddr);
1746 static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
1749 /* Invalidate by VA, all ASIDs (AArch64 version) */
1750 ARMCPU *cpu = arm_env_get_cpu(env);
1751 uint64_t pageaddr = value << 12;
1752 tlb_flush_page(CPU(cpu), pageaddr);
1755 static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
1758 /* Invalidate by ASID (AArch64 version) */
1759 ARMCPU *cpu = arm_env_get_cpu(env);
1760 int asid = extract64(value, 48, 16);
1761 tlb_flush(CPU(cpu), asid == 0);
1764 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
1766 /* We don't implement EL2, so the only control on DC ZVA is the
1767 * bit in the SCTLR which can prohibit access for EL0.
1769 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_DZE)) {
1770 return CP_ACCESS_TRAP;
1772 return CP_ACCESS_OK;
1775 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
1777 ARMCPU *cpu = arm_env_get_cpu(env);
1778 int dzp_bit = 1 << 4;
1780 /* DZP indicates whether DC ZVA access is allowed */
1781 if (aa64_zva_access(env, NULL) != CP_ACCESS_OK) {
1784 return cpu->dcz_blocksize | dzp_bit;
1787 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
1789 if (!env->pstate & PSTATE_SP) {
1790 /* Access to SP_EL0 is undefined if it's being used as
1791 * the stack pointer.
1793 return CP_ACCESS_TRAP_UNCATEGORIZED;
1795 return CP_ACCESS_OK;
1798 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
1800 return env->pstate & PSTATE_SP;
1803 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
1805 update_spsel(env, val);
1808 static const ARMCPRegInfo v8_cp_reginfo[] = {
1809 /* Minimal set of EL0-visible registers. This will need to be expanded
1810 * significantly for system emulation of AArch64 CPUs.
1812 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
1813 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
1814 .access = PL0_RW, .type = ARM_CP_NZCV },
1815 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
1816 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
1817 .type = ARM_CP_NO_MIGRATE,
1818 .access = PL0_RW, .accessfn = aa64_daif_access,
1819 .fieldoffset = offsetof(CPUARMState, daif),
1820 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
1821 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
1822 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
1823 .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
1824 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
1825 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
1826 .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
1827 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
1828 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
1829 .access = PL0_R, .type = ARM_CP_NO_MIGRATE,
1830 .readfn = aa64_dczid_read },
1831 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
1832 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
1833 .access = PL0_W, .type = ARM_CP_DC_ZVA,
1834 #ifndef CONFIG_USER_ONLY
1835 /* Avoid overhead of an access check that always passes in user-mode */
1836 .accessfn = aa64_zva_access,
1839 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
1840 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
1841 .access = PL1_R, .type = ARM_CP_CURRENTEL },
1842 /* Cache ops: all NOPs since we don't emulate caches */
1843 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
1844 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
1845 .access = PL1_W, .type = ARM_CP_NOP },
1846 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
1847 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
1848 .access = PL1_W, .type = ARM_CP_NOP },
1849 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
1850 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
1851 .access = PL0_W, .type = ARM_CP_NOP,
1852 .accessfn = aa64_cacheop_access },
1853 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
1854 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
1855 .access = PL1_W, .type = ARM_CP_NOP },
1856 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
1857 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
1858 .access = PL1_W, .type = ARM_CP_NOP },
1859 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
1860 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
1861 .access = PL0_W, .type = ARM_CP_NOP,
1862 .accessfn = aa64_cacheop_access },
1863 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
1864 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
1865 .access = PL1_W, .type = ARM_CP_NOP },
1866 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
1867 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
1868 .access = PL0_W, .type = ARM_CP_NOP,
1869 .accessfn = aa64_cacheop_access },
1870 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
1871 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
1872 .access = PL0_W, .type = ARM_CP_NOP,
1873 .accessfn = aa64_cacheop_access },
1874 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
1875 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
1876 .access = PL1_W, .type = ARM_CP_NOP },
1877 /* TLBI operations */
1878 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
1879 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1880 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1881 .writefn = tlbiall_write },
1882 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
1883 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1884 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1885 .writefn = tlbi_aa64_va_write },
1886 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
1887 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1888 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1889 .writefn = tlbi_aa64_asid_write },
1890 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
1891 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1892 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1893 .writefn = tlbi_aa64_vaa_write },
1894 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
1895 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 3, .opc2 = 5,
1896 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1897 .writefn = tlbi_aa64_va_write },
1898 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
1899 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 3, .opc2 = 7,
1900 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1901 .writefn = tlbi_aa64_vaa_write },
1902 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
1903 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1904 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1905 .writefn = tlbiall_write },
1906 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
1907 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1908 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1909 .writefn = tlbi_aa64_va_write },
1910 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
1911 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1912 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1913 .writefn = tlbi_aa64_asid_write },
1914 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
1915 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1916 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1917 .writefn = tlbi_aa64_vaa_write },
1918 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
1919 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 7, .opc2 = 5,
1920 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1921 .writefn = tlbi_aa64_va_write },
1922 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
1923 .opc0 = 1, .opc2 = 0, .crn = 8, .crm = 7, .opc2 = 7,
1924 .access = PL1_W, .type = ARM_CP_NO_MIGRATE,
1925 .writefn = tlbi_aa64_vaa_write },
1926 /* Dummy implementation of monitor debug system control register:
1927 * we don't support debug.
1929 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_AA64,
1930 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
1931 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1932 /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
1933 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_AA64,
1934 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
1935 .access = PL1_W, .type = ARM_CP_NOP },
1936 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
1937 .type = ARM_CP_NO_MIGRATE,
1938 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
1939 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, elr_el1) },
1940 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
1941 .type = ARM_CP_NO_MIGRATE,
1942 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
1943 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[0]) },
1944 /* We rely on the access checks not allowing the guest to write to the
1945 * state field when SPSel indicates that it's being used as the stack
1948 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
1949 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
1950 .access = PL1_RW, .accessfn = sp_el0_access,
1951 .type = ARM_CP_NO_MIGRATE,
1952 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
1953 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
1954 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
1955 .type = ARM_CP_NO_MIGRATE,
1956 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
1960 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1963 ARMCPU *cpu = arm_env_get_cpu(env);
1965 env->cp15.c1_sys = value;
1966 /* ??? Lots of these bits are not implemented. */
1967 /* This may enable/disable the MMU, so do a TLB flush. */
1968 tlb_flush(CPU(cpu), 1);
1971 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
1973 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
1974 * but the AArch32 CTR has its own reginfo struct)
1976 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UCT)) {
1977 return CP_ACCESS_TRAP;
1979 return CP_ACCESS_OK;
1982 static void define_aarch64_debug_regs(ARMCPU *cpu)
1984 /* Define breakpoint and watchpoint registers. These do nothing
1985 * but read as written, for now.
1989 for (i = 0; i < 16; i++) {
1990 ARMCPRegInfo dbgregs[] = {
1991 { .name = "DBGBVR", .state = ARM_CP_STATE_AA64,
1992 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
1994 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]) },
1995 { .name = "DBGBCR", .state = ARM_CP_STATE_AA64,
1996 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
1998 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]) },
1999 { .name = "DBGWVR", .state = ARM_CP_STATE_AA64,
2000 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
2002 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]) },
2003 { .name = "DBGWCR", .state = ARM_CP_STATE_AA64,
2004 .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
2006 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]) },
2009 define_arm_cp_regs(cpu, dbgregs);
2013 void register_cp_regs_for_features(ARMCPU *cpu)
2015 /* Register all the coprocessor registers based on feature bits */
2016 CPUARMState *env = &cpu->env;
2017 if (arm_feature(env, ARM_FEATURE_M)) {
2018 /* M profile has no coprocessor registers */
2022 define_arm_cp_regs(cpu, cp_reginfo);
2023 if (arm_feature(env, ARM_FEATURE_V6)) {
2024 /* The ID registers all have impdef reset values */
2025 ARMCPRegInfo v6_idregs[] = {
2026 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
2027 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
2028 .access = PL1_R, .type = ARM_CP_CONST,
2029 .resetvalue = cpu->id_pfr0 },
2030 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
2031 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
2032 .access = PL1_R, .type = ARM_CP_CONST,
2033 .resetvalue = cpu->id_pfr1 },
2034 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
2035 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
2036 .access = PL1_R, .type = ARM_CP_CONST,
2037 .resetvalue = cpu->id_dfr0 },
2038 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
2039 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
2040 .access = PL1_R, .type = ARM_CP_CONST,
2041 .resetvalue = cpu->id_afr0 },
2042 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
2043 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
2044 .access = PL1_R, .type = ARM_CP_CONST,
2045 .resetvalue = cpu->id_mmfr0 },
2046 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
2047 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
2048 .access = PL1_R, .type = ARM_CP_CONST,
2049 .resetvalue = cpu->id_mmfr1 },
2050 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
2051 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
2052 .access = PL1_R, .type = ARM_CP_CONST,
2053 .resetvalue = cpu->id_mmfr2 },
2054 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
2055 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
2056 .access = PL1_R, .type = ARM_CP_CONST,
2057 .resetvalue = cpu->id_mmfr3 },
2058 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
2059 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
2060 .access = PL1_R, .type = ARM_CP_CONST,
2061 .resetvalue = cpu->id_isar0 },
2062 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
2063 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
2064 .access = PL1_R, .type = ARM_CP_CONST,
2065 .resetvalue = cpu->id_isar1 },
2066 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
2067 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
2068 .access = PL1_R, .type = ARM_CP_CONST,
2069 .resetvalue = cpu->id_isar2 },
2070 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
2071 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
2072 .access = PL1_R, .type = ARM_CP_CONST,
2073 .resetvalue = cpu->id_isar3 },
2074 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
2075 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
2076 .access = PL1_R, .type = ARM_CP_CONST,
2077 .resetvalue = cpu->id_isar4 },
2078 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
2079 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
2080 .access = PL1_R, .type = ARM_CP_CONST,
2081 .resetvalue = cpu->id_isar5 },
2082 /* 6..7 are as yet unallocated and must RAZ */
2083 { .name = "ID_ISAR6", .cp = 15, .crn = 0, .crm = 2,
2084 .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST,
2086 { .name = "ID_ISAR7", .cp = 15, .crn = 0, .crm = 2,
2087 .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST,
2091 define_arm_cp_regs(cpu, v6_idregs);
2092 define_arm_cp_regs(cpu, v6_cp_reginfo);
2094 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
2096 if (arm_feature(env, ARM_FEATURE_V6K)) {
2097 define_arm_cp_regs(cpu, v6k_cp_reginfo);
2099 if (arm_feature(env, ARM_FEATURE_V7)) {
2100 /* v7 performance monitor control register: same implementor
2101 * field as main ID register, and we implement only the cycle
2104 #ifndef CONFIG_USER_ONLY
2105 ARMCPRegInfo pmcr = {
2106 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
2107 .access = PL0_RW, .resetvalue = cpu->midr & 0xff000000,
2109 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
2110 .accessfn = pmreg_access, .writefn = pmcr_write,
2111 .raw_writefn = raw_write,
2113 define_one_arm_cp_reg(cpu, &pmcr);
2115 ARMCPRegInfo clidr = {
2116 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
2117 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
2118 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
2120 define_one_arm_cp_reg(cpu, &clidr);
2121 define_arm_cp_regs(cpu, v7_cp_reginfo);
2123 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
2125 if (arm_feature(env, ARM_FEATURE_V8)) {
2126 /* AArch64 ID registers, which all have impdef reset values */
2127 ARMCPRegInfo v8_idregs[] = {
2128 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
2129 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
2130 .access = PL1_R, .type = ARM_CP_CONST,
2131 .resetvalue = cpu->id_aa64pfr0 },
2132 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
2133 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
2134 .access = PL1_R, .type = ARM_CP_CONST,
2135 .resetvalue = cpu->id_aa64pfr1},
2136 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
2137 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
2138 .access = PL1_R, .type = ARM_CP_CONST,
2139 /* We mask out the PMUVer field, beacuse we don't currently
2140 * implement the PMU. Not advertising it prevents the guest
2141 * from trying to use it and getting UNDEFs on registers we
2144 .resetvalue = cpu->id_aa64dfr0 & ~0xf00 },
2145 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
2146 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
2147 .access = PL1_R, .type = ARM_CP_CONST,
2148 .resetvalue = cpu->id_aa64dfr1 },
2149 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
2150 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
2151 .access = PL1_R, .type = ARM_CP_CONST,
2152 .resetvalue = cpu->id_aa64afr0 },
2153 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
2154 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
2155 .access = PL1_R, .type = ARM_CP_CONST,
2156 .resetvalue = cpu->id_aa64afr1 },
2157 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
2158 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
2159 .access = PL1_R, .type = ARM_CP_CONST,
2160 .resetvalue = cpu->id_aa64isar0 },
2161 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
2162 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
2163 .access = PL1_R, .type = ARM_CP_CONST,
2164 .resetvalue = cpu->id_aa64isar1 },
2165 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
2166 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
2167 .access = PL1_R, .type = ARM_CP_CONST,
2168 .resetvalue = cpu->id_aa64mmfr0 },
2169 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
2170 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
2171 .access = PL1_R, .type = ARM_CP_CONST,
2172 .resetvalue = cpu->id_aa64mmfr1 },
2173 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
2174 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
2175 .access = PL1_R, .type = ARM_CP_CONST,
2176 .resetvalue = cpu->mvfr0 },
2177 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
2178 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
2179 .access = PL1_R, .type = ARM_CP_CONST,
2180 .resetvalue = cpu->mvfr1 },
2181 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
2182 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
2183 .access = PL1_R, .type = ARM_CP_CONST,
2184 .resetvalue = cpu->mvfr2 },
2187 define_arm_cp_regs(cpu, v8_idregs);
2188 define_arm_cp_regs(cpu, v8_cp_reginfo);
2189 define_aarch64_debug_regs(cpu);
2191 if (arm_feature(env, ARM_FEATURE_MPU)) {
2192 /* These are the MPU registers prior to PMSAv6. Any new
2193 * PMSA core later than the ARM946 will require that we
2194 * implement the PMSAv6 or PMSAv7 registers, which are
2195 * completely different.
2197 assert(!arm_feature(env, ARM_FEATURE_V6));
2198 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
2200 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
2202 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
2203 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
2205 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
2206 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
2208 if (arm_feature(env, ARM_FEATURE_VAPA)) {
2209 define_arm_cp_regs(cpu, vapa_cp_reginfo);
2211 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
2212 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
2214 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
2215 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
2217 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
2218 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
2220 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
2221 define_arm_cp_regs(cpu, omap_cp_reginfo);
2223 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
2224 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
2226 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2227 define_arm_cp_regs(cpu, xscale_cp_reginfo);
2229 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
2230 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
2232 if (arm_feature(env, ARM_FEATURE_LPAE)) {
2233 define_arm_cp_regs(cpu, lpae_cp_reginfo);
2235 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
2236 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
2237 * be read-only (ie write causes UNDEF exception).
2240 ARMCPRegInfo id_cp_reginfo[] = {
2241 /* Note that the MIDR isn't a simple constant register because
2242 * of the TI925 behaviour where writes to another register can
2243 * cause the MIDR value to change.
2245 * Unimplemented registers in the c15 0 0 0 space default to
2246 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
2247 * and friends override accordingly.
2250 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
2251 .access = PL1_R, .resetvalue = cpu->midr,
2252 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
2253 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
2254 .type = ARM_CP_OVERRIDE },
2255 { .name = "MIDR_EL1", .state = ARM_CP_STATE_AA64,
2256 .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 0, .crm = 0,
2257 .access = PL1_R, .resetvalue = cpu->midr, .type = ARM_CP_CONST },
2259 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
2260 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
2261 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
2262 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
2263 .access = PL0_R, .accessfn = ctr_el0_access,
2264 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
2266 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
2267 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2269 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
2270 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2271 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
2273 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
2274 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2276 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
2277 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2279 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
2280 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2282 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
2283 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2285 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
2286 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2289 ARMCPRegInfo crn0_wi_reginfo = {
2290 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
2291 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
2292 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
2294 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
2295 arm_feature(env, ARM_FEATURE_STRONGARM)) {
2297 /* Register the blanket "writes ignored" value first to cover the
2298 * whole space. Then update the specific ID registers to allow write
2299 * access, so that they ignore writes rather than causing them to
2302 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
2303 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
2307 define_arm_cp_regs(cpu, id_cp_reginfo);
2310 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
2311 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
2314 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
2315 ARMCPRegInfo auxcr = {
2316 .name = "AUXCR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1,
2317 .access = PL1_RW, .type = ARM_CP_CONST,
2318 .resetvalue = cpu->reset_auxcr
2320 define_one_arm_cp_reg(cpu, &auxcr);
2323 if (arm_feature(env, ARM_FEATURE_CBAR)) {
2324 ARMCPRegInfo cbar = {
2325 .name = "CBAR", .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
2326 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
2327 .fieldoffset = offsetof(CPUARMState, cp15.c15_config_base_address)
2329 define_one_arm_cp_reg(cpu, &cbar);
2332 /* Generic registers whose values depend on the implementation */
2334 ARMCPRegInfo sctlr = {
2335 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
2336 .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
2337 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sys),
2338 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
2339 .raw_writefn = raw_write,
2341 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
2342 /* Normally we would always end the TB on an SCTLR write, but Linux
2343 * arch/arm/mach-pxa/sleep.S expects two instructions following
2344 * an MMU enable to execute from cache. Imitate this behaviour.
2346 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
2348 define_one_arm_cp_reg(cpu, &sctlr);
2352 ARMCPU *cpu_arm_init(const char *cpu_model)
2354 return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
2357 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
2359 CPUState *cs = CPU(cpu);
2360 CPUARMState *env = &cpu->env;
2362 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
2363 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
2364 aarch64_fpu_gdb_set_reg,
2365 34, "aarch64-fpu.xml", 0);
2366 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
2367 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
2368 51, "arm-neon.xml", 0);
2369 } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
2370 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
2371 35, "arm-vfp3.xml", 0);
2372 } else if (arm_feature(env, ARM_FEATURE_VFP)) {
2373 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
2374 19, "arm-vfp.xml", 0);
2378 /* Sort alphabetically by type name, except for "any". */
2379 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
2381 ObjectClass *class_a = (ObjectClass *)a;
2382 ObjectClass *class_b = (ObjectClass *)b;
2383 const char *name_a, *name_b;
2385 name_a = object_class_get_name(class_a);
2386 name_b = object_class_get_name(class_b);
2387 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
2389 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
2392 return strcmp(name_a, name_b);
2396 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
2398 ObjectClass *oc = data;
2399 CPUListState *s = user_data;
2400 const char *typename;
2403 typename = object_class_get_name(oc);
2404 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
2405 (*s->cpu_fprintf)(s->file, " %s\n",
2410 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2414 .cpu_fprintf = cpu_fprintf,
2418 list = object_class_get_list(TYPE_ARM_CPU, false);
2419 list = g_slist_sort(list, arm_cpu_list_compare);
2420 (*cpu_fprintf)(f, "Available CPUs:\n");
2421 g_slist_foreach(list, arm_cpu_list_entry, &s);
2424 /* The 'host' CPU type is dynamically registered only if KVM is
2425 * enabled, so we have to special-case it here:
2427 (*cpu_fprintf)(f, " host (only available in KVM mode)\n");
2431 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
2433 ObjectClass *oc = data;
2434 CpuDefinitionInfoList **cpu_list = user_data;
2435 CpuDefinitionInfoList *entry;
2436 CpuDefinitionInfo *info;
2437 const char *typename;
2439 typename = object_class_get_name(oc);
2440 info = g_malloc0(sizeof(*info));
2441 info->name = g_strndup(typename,
2442 strlen(typename) - strlen("-" TYPE_ARM_CPU));
2444 entry = g_malloc0(sizeof(*entry));
2445 entry->value = info;
2446 entry->next = *cpu_list;
2450 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2452 CpuDefinitionInfoList *cpu_list = NULL;
2455 list = object_class_get_list(TYPE_ARM_CPU, false);
2456 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
2462 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
2463 void *opaque, int state,
2464 int crm, int opc1, int opc2)
2466 /* Private utility function for define_one_arm_cp_reg_with_opaque():
2467 * add a single reginfo struct to the hash table.
2469 uint32_t *key = g_new(uint32_t, 1);
2470 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
2471 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
2472 if (r->state == ARM_CP_STATE_BOTH && state == ARM_CP_STATE_AA32) {
2473 /* The AArch32 view of a shared register sees the lower 32 bits
2474 * of a 64 bit backing field. It is not migratable as the AArch64
2475 * view handles that. AArch64 also handles reset.
2476 * We assume it is a cp15 register.
2479 r2->type |= ARM_CP_NO_MIGRATE;
2480 r2->resetfn = arm_cp_reset_ignore;
2481 #ifdef HOST_WORDS_BIGENDIAN
2482 if (r2->fieldoffset) {
2483 r2->fieldoffset += sizeof(uint32_t);
2487 if (state == ARM_CP_STATE_AA64) {
2488 /* To allow abbreviation of ARMCPRegInfo
2489 * definitions, we treat cp == 0 as equivalent to
2490 * the value for "standard guest-visible sysreg".
2493 r2->cp = CP_REG_ARM64_SYSREG_CP;
2495 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
2496 r2->opc0, opc1, opc2);
2498 *key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2);
2501 r2->opaque = opaque;
2503 /* reginfo passed to helpers is correct for the actual access,
2504 * and is never ARM_CP_STATE_BOTH:
2507 /* Make sure reginfo passed to helpers for wildcarded regs
2508 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
2513 /* By convention, for wildcarded registers only the first
2514 * entry is used for migration; the others are marked as
2515 * NO_MIGRATE so we don't try to transfer the register
2516 * multiple times. Special registers (ie NOP/WFI) are
2519 if ((r->type & ARM_CP_SPECIAL) ||
2520 ((r->crm == CP_ANY) && crm != 0) ||
2521 ((r->opc1 == CP_ANY) && opc1 != 0) ||
2522 ((r->opc2 == CP_ANY) && opc2 != 0)) {
2523 r2->type |= ARM_CP_NO_MIGRATE;
2526 /* Overriding of an existing definition must be explicitly
2529 if (!(r->type & ARM_CP_OVERRIDE)) {
2530 ARMCPRegInfo *oldreg;
2531 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
2532 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
2533 fprintf(stderr, "Register redefined: cp=%d %d bit "
2534 "crn=%d crm=%d opc1=%d opc2=%d, "
2535 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
2536 r2->crn, r2->crm, r2->opc1, r2->opc2,
2537 oldreg->name, r2->name);
2538 g_assert_not_reached();
2541 g_hash_table_insert(cpu->cp_regs, key, r2);
2545 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
2546 const ARMCPRegInfo *r, void *opaque)
2548 /* Define implementations of coprocessor registers.
2549 * We store these in a hashtable because typically
2550 * there are less than 150 registers in a space which
2551 * is 16*16*16*8*8 = 262144 in size.
2552 * Wildcarding is supported for the crm, opc1 and opc2 fields.
2553 * If a register is defined twice then the second definition is
2554 * used, so this can be used to define some generic registers and
2555 * then override them with implementation specific variations.
2556 * At least one of the original and the second definition should
2557 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
2558 * against accidental use.
2560 * The state field defines whether the register is to be
2561 * visible in the AArch32 or AArch64 execution state. If the
2562 * state is set to ARM_CP_STATE_BOTH then we synthesise a
2563 * reginfo structure for the AArch32 view, which sees the lower
2564 * 32 bits of the 64 bit register.
2566 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
2567 * be wildcarded. AArch64 registers are always considered to be 64
2568 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
2569 * the register, if any.
2571 int crm, opc1, opc2, state;
2572 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
2573 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
2574 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
2575 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
2576 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
2577 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
2578 /* 64 bit registers have only CRm and Opc1 fields */
2579 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
2580 /* op0 only exists in the AArch64 encodings */
2581 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
2582 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
2583 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
2584 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
2585 * encodes a minimum access level for the register. We roll this
2586 * runtime check into our general permission check code, so check
2587 * here that the reginfo's specified permissions are strict enough
2588 * to encompass the generic architectural permission check.
2590 if (r->state != ARM_CP_STATE_AA32) {
2593 case 0: case 1: case 2:
2606 /* unallocated encoding, so not possible */
2614 /* min_EL EL1, secure mode only (we don't check the latter) */
2618 /* broken reginfo with out-of-range opc1 */
2622 /* assert our permissions are not too lax (stricter is fine) */
2623 assert((r->access & ~mask) == 0);
2626 /* Check that the register definition has enough info to handle
2627 * reads and writes if they are permitted.
2629 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
2630 if (r->access & PL3_R) {
2631 assert(r->fieldoffset || r->readfn);
2633 if (r->access & PL3_W) {
2634 assert(r->fieldoffset || r->writefn);
2637 /* Bad type field probably means missing sentinel at end of reg list */
2638 assert(cptype_valid(r->type));
2639 for (crm = crmmin; crm <= crmmax; crm++) {
2640 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
2641 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
2642 for (state = ARM_CP_STATE_AA32;
2643 state <= ARM_CP_STATE_AA64; state++) {
2644 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
2647 add_cpreg_to_hashtable(cpu, r, opaque, state,
2655 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
2656 const ARMCPRegInfo *regs, void *opaque)
2658 /* Define a whole list of registers */
2659 const ARMCPRegInfo *r;
2660 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
2661 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
2665 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
2667 return g_hash_table_lookup(cpregs, &encoded_cp);
2670 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
2673 /* Helper coprocessor write function for write-ignore registers */
2676 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
2678 /* Helper coprocessor write function for read-as-zero registers */
2682 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
2684 /* Helper coprocessor reset function for do-nothing-on-reset registers */
2687 static int bad_mode_switch(CPUARMState *env, int mode)
2689 /* Return true if it is not valid for us to switch to
2690 * this CPU mode (ie all the UNPREDICTABLE cases in
2691 * the ARM ARM CPSRWriteByInstr pseudocode).
2694 case ARM_CPU_MODE_USR:
2695 case ARM_CPU_MODE_SYS:
2696 case ARM_CPU_MODE_SVC:
2697 case ARM_CPU_MODE_ABT:
2698 case ARM_CPU_MODE_UND:
2699 case ARM_CPU_MODE_IRQ:
2700 case ARM_CPU_MODE_FIQ:
2707 uint32_t cpsr_read(CPUARMState *env)
2710 ZF = (env->ZF == 0);
2711 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
2712 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
2713 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
2714 | ((env->condexec_bits & 0xfc) << 8)
2715 | (env->GE << 16) | (env->daif & CPSR_AIF);
2718 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
2720 if (mask & CPSR_NZCV) {
2721 env->ZF = (~val) & CPSR_Z;
2723 env->CF = (val >> 29) & 1;
2724 env->VF = (val << 3) & 0x80000000;
2727 env->QF = ((val & CPSR_Q) != 0);
2729 env->thumb = ((val & CPSR_T) != 0);
2730 if (mask & CPSR_IT_0_1) {
2731 env->condexec_bits &= ~3;
2732 env->condexec_bits |= (val >> 25) & 3;
2734 if (mask & CPSR_IT_2_7) {
2735 env->condexec_bits &= 3;
2736 env->condexec_bits |= (val >> 8) & 0xfc;
2738 if (mask & CPSR_GE) {
2739 env->GE = (val >> 16) & 0xf;
2742 env->daif &= ~(CPSR_AIF & mask);
2743 env->daif |= val & CPSR_AIF & mask;
2745 if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
2746 if (bad_mode_switch(env, val & CPSR_M)) {
2747 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
2748 * We choose to ignore the attempt and leave the CPSR M field
2753 switch_mode(env, val & CPSR_M);
2756 mask &= ~CACHED_CPSR_BITS;
2757 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
2760 /* Sign/zero extend */
2761 uint32_t HELPER(sxtb16)(uint32_t x)
2764 res = (uint16_t)(int8_t)x;
2765 res |= (uint32_t)(int8_t)(x >> 16) << 16;
2769 uint32_t HELPER(uxtb16)(uint32_t x)
2772 res = (uint16_t)(uint8_t)x;
2773 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
2777 uint32_t HELPER(clz)(uint32_t x)
2782 int32_t HELPER(sdiv)(int32_t num, int32_t den)
2786 if (num == INT_MIN && den == -1)
2791 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
2798 uint32_t HELPER(rbit)(uint32_t x)
2800 x = ((x & 0xff000000) >> 24)
2801 | ((x & 0x00ff0000) >> 8)
2802 | ((x & 0x0000ff00) << 8)
2803 | ((x & 0x000000ff) << 24);
2804 x = ((x & 0xf0f0f0f0) >> 4)
2805 | ((x & 0x0f0f0f0f) << 4);
2806 x = ((x & 0x88888888) >> 3)
2807 | ((x & 0x44444444) >> 1)
2808 | ((x & 0x22222222) << 1)
2809 | ((x & 0x11111111) << 3);
2813 #if defined(CONFIG_USER_ONLY)
2815 void arm_cpu_do_interrupt(CPUState *cs)
2817 cs->exception_index = -1;
2820 int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
2823 ARMCPU *cpu = ARM_CPU(cs);
2824 CPUARMState *env = &cpu->env;
2826 env->exception.vaddress = address;
2828 cs->exception_index = EXCP_PREFETCH_ABORT;
2830 cs->exception_index = EXCP_DATA_ABORT;
2835 /* These should probably raise undefined insn exceptions. */
2836 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
2838 ARMCPU *cpu = arm_env_get_cpu(env);
2840 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
2843 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2845 ARMCPU *cpu = arm_env_get_cpu(env);
2847 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
2851 void switch_mode(CPUARMState *env, int mode)
2853 ARMCPU *cpu = arm_env_get_cpu(env);
2855 if (mode != ARM_CPU_MODE_USR) {
2856 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
2860 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
2862 ARMCPU *cpu = arm_env_get_cpu(env);
2864 cpu_abort(CPU(cpu), "banked r13 write\n");
2867 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
2869 ARMCPU *cpu = arm_env_get_cpu(env);
2871 cpu_abort(CPU(cpu), "banked r13 read\n");
2877 /* Map CPU modes onto saved register banks. */
2878 int bank_number(int mode)
2881 case ARM_CPU_MODE_USR:
2882 case ARM_CPU_MODE_SYS:
2884 case ARM_CPU_MODE_SVC:
2886 case ARM_CPU_MODE_ABT:
2888 case ARM_CPU_MODE_UND:
2890 case ARM_CPU_MODE_IRQ:
2892 case ARM_CPU_MODE_FIQ:
2895 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode);
2898 void switch_mode(CPUARMState *env, int mode)
2903 old_mode = env->uncached_cpsr & CPSR_M;
2904 if (mode == old_mode)
2907 if (old_mode == ARM_CPU_MODE_FIQ) {
2908 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
2909 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
2910 } else if (mode == ARM_CPU_MODE_FIQ) {
2911 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
2912 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
2915 i = bank_number(old_mode);
2916 env->banked_r13[i] = env->regs[13];
2917 env->banked_r14[i] = env->regs[14];
2918 env->banked_spsr[i] = env->spsr;
2920 i = bank_number(mode);
2921 env->regs[13] = env->banked_r13[i];
2922 env->regs[14] = env->banked_r14[i];
2923 env->spsr = env->banked_spsr[i];
2926 static void v7m_push(CPUARMState *env, uint32_t val)
2928 CPUState *cs = CPU(arm_env_get_cpu(env));
2931 stl_phys(cs->as, env->regs[13], val);
2934 static uint32_t v7m_pop(CPUARMState *env)
2936 CPUState *cs = CPU(arm_env_get_cpu(env));
2939 val = ldl_phys(cs->as, env->regs[13]);
2944 /* Switch to V7M main or process stack pointer. */
2945 static void switch_v7m_sp(CPUARMState *env, int process)
2948 if (env->v7m.current_sp != process) {
2949 tmp = env->v7m.other_sp;
2950 env->v7m.other_sp = env->regs[13];
2951 env->regs[13] = tmp;
2952 env->v7m.current_sp = process;
2956 static void do_v7m_exception_exit(CPUARMState *env)
2961 type = env->regs[15];
2962 if (env->v7m.exception != 0)
2963 armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
2965 /* Switch to the target stack. */
2966 switch_v7m_sp(env, (type & 4) != 0);
2967 /* Pop registers. */
2968 env->regs[0] = v7m_pop(env);
2969 env->regs[1] = v7m_pop(env);
2970 env->regs[2] = v7m_pop(env);
2971 env->regs[3] = v7m_pop(env);
2972 env->regs[12] = v7m_pop(env);
2973 env->regs[14] = v7m_pop(env);
2974 env->regs[15] = v7m_pop(env);
2975 xpsr = v7m_pop(env);
2976 xpsr_write(env, xpsr, 0xfffffdff);
2977 /* Undo stack alignment. */
2980 /* ??? The exception return type specifies Thread/Handler mode. However
2981 this is also implied by the xPSR value. Not sure what to do
2982 if there is a mismatch. */
2983 /* ??? Likewise for mismatches between the CONTROL register and the stack
2987 void arm_v7m_cpu_do_interrupt(CPUState *cs)
2989 ARMCPU *cpu = ARM_CPU(cs);
2990 CPUARMState *env = &cpu->env;
2991 uint32_t xpsr = xpsr_read(env);
2995 arm_log_exception(cs->exception_index);
2998 if (env->v7m.current_sp)
3000 if (env->v7m.exception == 0)
3003 /* For exceptions we just mark as pending on the NVIC, and let that
3005 /* TODO: Need to escalate if the current priority is higher than the
3006 one we're raising. */
3007 switch (cs->exception_index) {
3009 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
3012 /* The PC already points to the next instruction. */
3013 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
3015 case EXCP_PREFETCH_ABORT:
3016 case EXCP_DATA_ABORT:
3017 /* TODO: if we implemented the MPU registers, this is where we
3018 * should set the MMFAR, etc from exception.fsr and exception.vaddress.
3020 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
3023 if (semihosting_enabled) {
3025 nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
3028 env->regs[0] = do_arm_semihosting(env);
3029 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
3033 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
3036 env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
3038 case EXCP_EXCEPTION_EXIT:
3039 do_v7m_exception_exit(env);
3042 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
3043 return; /* Never happens. Keep compiler happy. */
3046 /* Align stack pointer. */
3047 /* ??? Should only do this if Configuration Control Register
3048 STACKALIGN bit is set. */
3049 if (env->regs[13] & 4) {
3053 /* Switch to the handler mode. */
3054 v7m_push(env, xpsr);
3055 v7m_push(env, env->regs[15]);
3056 v7m_push(env, env->regs[14]);
3057 v7m_push(env, env->regs[12]);
3058 v7m_push(env, env->regs[3]);
3059 v7m_push(env, env->regs[2]);
3060 v7m_push(env, env->regs[1]);
3061 v7m_push(env, env->regs[0]);
3062 switch_v7m_sp(env, 0);
3064 env->condexec_bits = 0;
3066 addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4);
3067 env->regs[15] = addr & 0xfffffffe;
3068 env->thumb = addr & 1;
3071 /* Handle a CPU exception. */
3072 void arm_cpu_do_interrupt(CPUState *cs)
3074 ARMCPU *cpu = ARM_CPU(cs);
3075 CPUARMState *env = &cpu->env;
3083 arm_log_exception(cs->exception_index);
3085 /* TODO: Vectored interrupt controller. */
3086 switch (cs->exception_index) {
3088 new_mode = ARM_CPU_MODE_UND;
3097 if (semihosting_enabled) {
3098 /* Check for semihosting interrupt. */
3100 mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code)
3103 mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code)
3106 /* Only intercept calls from privileged modes, to provide some
3107 semblance of security. */
3108 if (((mask == 0x123456 && !env->thumb)
3109 || (mask == 0xab && env->thumb))
3110 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
3111 env->regs[0] = do_arm_semihosting(env);
3112 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
3116 new_mode = ARM_CPU_MODE_SVC;
3119 /* The PC already points to the next instruction. */
3123 /* See if this is a semihosting syscall. */
3124 if (env->thumb && semihosting_enabled) {
3125 mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
3127 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
3129 env->regs[0] = do_arm_semihosting(env);
3130 qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
3134 env->exception.fsr = 2;
3135 /* Fall through to prefetch abort. */
3136 case EXCP_PREFETCH_ABORT:
3137 env->cp15.ifsr_el2 = env->exception.fsr;
3138 env->cp15.far_el1 = deposit64(env->cp15.far_el1, 32, 32,
3139 env->exception.vaddress);
3140 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
3141 env->cp15.ifsr_el2, (uint32_t)env->exception.vaddress);
3142 new_mode = ARM_CPU_MODE_ABT;
3144 mask = CPSR_A | CPSR_I;
3147 case EXCP_DATA_ABORT:
3148 env->cp15.esr_el1 = env->exception.fsr;
3149 env->cp15.far_el1 = deposit64(env->cp15.far_el1, 0, 32,
3150 env->exception.vaddress);
3151 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
3152 (uint32_t)env->cp15.esr_el1,
3153 (uint32_t)env->exception.vaddress);
3154 new_mode = ARM_CPU_MODE_ABT;
3156 mask = CPSR_A | CPSR_I;
3160 new_mode = ARM_CPU_MODE_IRQ;
3162 /* Disable IRQ and imprecise data aborts. */
3163 mask = CPSR_A | CPSR_I;
3167 new_mode = ARM_CPU_MODE_FIQ;
3169 /* Disable FIQ, IRQ and imprecise data aborts. */
3170 mask = CPSR_A | CPSR_I | CPSR_F;
3174 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
3175 return; /* Never happens. Keep compiler happy. */
3178 if (env->cp15.c1_sys & SCTLR_V) {
3179 /* when enabled, base address cannot be remapped. */
3182 /* ARM v7 architectures provide a vector base address register to remap
3183 * the interrupt vector table.
3184 * This register is only followed in non-monitor mode, and has a secure
3185 * and un-secure copy. Since the cpu is always in a un-secure operation
3186 * and is never in monitor mode this feature is always active.
3187 * Note: only bits 31:5 are valid.
3189 addr += env->cp15.c12_vbar;
3191 switch_mode (env, new_mode);
3192 env->spsr = cpsr_read(env);
3193 /* Clear IT bits. */
3194 env->condexec_bits = 0;
3195 /* Switch to the new mode, and to the correct instruction set. */
3196 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
3198 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
3199 * and we should just guard the thumb mode on V4 */
3200 if (arm_feature(env, ARM_FEATURE_V4T)) {
3201 env->thumb = (env->cp15.c1_sys & SCTLR_TE) != 0;
3203 env->regs[14] = env->regs[15] + offset;
3204 env->regs[15] = addr;
3205 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
3208 /* Check section/page access permissions.
3209 Returns the page protection flags, or zero if the access is not
3211 static inline int check_ap(CPUARMState *env, int ap, int domain_prot,
3212 int access_type, int is_user)
3216 if (domain_prot == 3) {
3217 return PAGE_READ | PAGE_WRITE;
3220 if (access_type == 1)
3223 prot_ro = PAGE_READ;
3227 if (arm_feature(env, ARM_FEATURE_V7)) {
3230 if (access_type == 1)
3232 switch (env->cp15.c1_sys & (SCTLR_S | SCTLR_R)) {
3234 return is_user ? 0 : PAGE_READ;
3241 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
3246 return PAGE_READ | PAGE_WRITE;
3248 return PAGE_READ | PAGE_WRITE;
3249 case 4: /* Reserved. */
3252 return is_user ? 0 : prot_ro;
3256 if (!arm_feature (env, ARM_FEATURE_V6K))
3264 static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
3268 if (address & env->cp15.c2_mask)
3269 table = env->cp15.ttbr1_el1 & 0xffffc000;
3271 table = env->cp15.ttbr0_el1 & env->cp15.c2_base_mask;
3273 table |= (address >> 18) & 0x3ffc;
3277 static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
3278 int is_user, hwaddr *phys_ptr,
3279 int *prot, target_ulong *page_size)
3281 CPUState *cs = CPU(arm_env_get_cpu(env));
3291 /* Pagetable walk. */
3292 /* Lookup l1 descriptor. */
3293 table = get_level1_table_address(env, address);
3294 desc = ldl_phys(cs->as, table);
3296 domain = (desc >> 5) & 0x0f;
3297 domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
3299 /* Section translation fault. */
3303 if (domain_prot == 0 || domain_prot == 2) {
3305 code = 9; /* Section domain fault. */
3307 code = 11; /* Page domain fault. */
3312 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
3313 ap = (desc >> 10) & 3;
3315 *page_size = 1024 * 1024;
3317 /* Lookup l2 entry. */
3319 /* Coarse pagetable. */
3320 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
3322 /* Fine pagetable. */
3323 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
3325 desc = ldl_phys(cs->as, table);
3327 case 0: /* Page translation fault. */
3330 case 1: /* 64k page. */
3331 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
3332 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
3333 *page_size = 0x10000;
3335 case 2: /* 4k page. */
3336 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
3337 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
3338 *page_size = 0x1000;
3340 case 3: /* 1k page. */
3342 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
3343 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
3345 /* Page translation fault. */
3350 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
3352 ap = (desc >> 4) & 3;
3356 /* Never happens, but compiler isn't smart enough to tell. */
3361 *prot = check_ap(env, ap, domain_prot, access_type, is_user);
3363 /* Access permission fault. */
3367 *phys_ptr = phys_addr;
3370 return code | (domain << 4);
3373 static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
3374 int is_user, hwaddr *phys_ptr,
3375 int *prot, target_ulong *page_size)
3377 CPUState *cs = CPU(arm_env_get_cpu(env));
3389 /* Pagetable walk. */
3390 /* Lookup l1 descriptor. */
3391 table = get_level1_table_address(env, address);
3392 desc = ldl_phys(cs->as, table);
3394 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
3395 /* Section translation fault, or attempt to use the encoding
3396 * which is Reserved on implementations without PXN.
3401 if ((type == 1) || !(desc & (1 << 18))) {
3402 /* Page or Section. */
3403 domain = (desc >> 5) & 0x0f;
3405 domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
3406 if (domain_prot == 0 || domain_prot == 2) {
3408 code = 9; /* Section domain fault. */
3410 code = 11; /* Page domain fault. */
3415 if (desc & (1 << 18)) {
3417 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
3418 *page_size = 0x1000000;
3421 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
3422 *page_size = 0x100000;
3424 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
3425 xn = desc & (1 << 4);
3429 if (arm_feature(env, ARM_FEATURE_PXN)) {
3430 pxn = (desc >> 2) & 1;
3432 /* Lookup l2 entry. */
3433 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
3434 desc = ldl_phys(cs->as, table);
3435 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
3437 case 0: /* Page translation fault. */
3440 case 1: /* 64k page. */
3441 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
3442 xn = desc & (1 << 15);
3443 *page_size = 0x10000;
3445 case 2: case 3: /* 4k page. */
3446 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
3448 *page_size = 0x1000;
3451 /* Never happens, but compiler isn't smart enough to tell. */
3456 if (domain_prot == 3) {
3457 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3459 if (pxn && !is_user) {
3462 if (xn && access_type == 2)
3465 /* The simplified model uses AP[0] as an access control bit. */
3466 if ((env->cp15.c1_sys & SCTLR_AFE) && (ap & 1) == 0) {
3467 /* Access flag fault. */
3468 code = (code == 15) ? 6 : 3;
3471 *prot = check_ap(env, ap, domain_prot, access_type, is_user);
3473 /* Access permission fault. */
3480 *phys_ptr = phys_addr;
3483 return code | (domain << 4);
3486 /* Fault type for long-descriptor MMU fault reporting; this corresponds
3487 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
3490 translation_fault = 1,
3492 permission_fault = 3,
3495 static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
3496 int access_type, int is_user,
3497 hwaddr *phys_ptr, int *prot,
3498 target_ulong *page_size_ptr)
3500 CPUState *cs = CPU(arm_env_get_cpu(env));
3501 /* Read an LPAE long-descriptor translation table. */
3502 MMUFaultType fault_type = translation_fault;
3509 hwaddr descaddr, descmask;
3510 uint32_t tableattrs;
3511 target_ulong page_size;
3513 int32_t granule_sz = 9;
3514 int32_t va_size = 32;
3517 if (arm_el_is_aa64(env, 1)) {
3519 if (extract64(address, 55, 1))
3520 tbi = extract64(env->cp15.c2_control, 38, 1);
3522 tbi = extract64(env->cp15.c2_control, 37, 1);
3526 /* Determine whether this address is in the region controlled by
3527 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
3528 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
3529 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
3531 uint32_t t0sz = extract32(env->cp15.c2_control, 0, 6);
3532 if (arm_el_is_aa64(env, 1)) {
3533 t0sz = MIN(t0sz, 39);
3534 t0sz = MAX(t0sz, 16);
3536 uint32_t t1sz = extract32(env->cp15.c2_control, 16, 6);
3537 if (arm_el_is_aa64(env, 1)) {
3538 t1sz = MIN(t1sz, 39);
3539 t1sz = MAX(t1sz, 16);
3541 if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) {
3542 /* there is a ttbr0 region and we are in it (high bits all zero) */
3544 } else if (t1sz && !extract64(~address, va_size - t1sz, t1sz - tbi)) {
3545 /* there is a ttbr1 region and we are in it (high bits all one) */
3548 /* ttbr0 region is "everything not in the ttbr1 region" */
3551 /* ttbr1 region is "everything not in the ttbr0 region" */
3554 /* in the gap between the two regions, this is a Translation fault */
3555 fault_type = translation_fault;
3559 /* Note that QEMU ignores shareability and cacheability attributes,
3560 * so we don't need to do anything with the SH, ORGN, IRGN fields
3561 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
3562 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
3563 * implement any ASID-like capability so we can ignore it (instead
3564 * we will always flush the TLB any time the ASID is changed).
3566 if (ttbr_select == 0) {
3567 ttbr = env->cp15.ttbr0_el1;
3568 epd = extract32(env->cp15.c2_control, 7, 1);
3571 tg = extract32(env->cp15.c2_control, 14, 2);
3572 if (tg == 1) { /* 64KB pages */
3575 if (tg == 2) { /* 16KB pages */
3579 ttbr = env->cp15.ttbr1_el1;
3580 epd = extract32(env->cp15.c2_control, 23, 1);
3583 tg = extract32(env->cp15.c2_control, 30, 2);
3584 if (tg == 3) { /* 64KB pages */
3587 if (tg == 1) { /* 16KB pages */
3593 /* Translation table walk disabled => Translation fault on TLB miss */
3597 /* The starting level depends on the virtual address size which can be
3598 * up to 48-bits and the translation granule size.
3600 if ((va_size - tsz) > (granule_sz * 4 + 3)) {
3602 } else if ((va_size - tsz) > (granule_sz * 3 + 3)) {
3608 /* Clear the vaddr bits which aren't part of the within-region address,
3609 * so that we don't have to special case things when calculating the
3610 * first descriptor address.
3613 address &= (1ULL << (va_size - tsz)) - 1;
3616 descmask = (1ULL << (granule_sz + 3)) - 1;
3618 /* Now we can extract the actual base address from the TTBR */
3619 descaddr = extract64(ttbr, 0, 48);
3620 descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1);
3624 uint64_t descriptor;
3626 descaddr |= (address >> (granule_sz * (4 - level))) & descmask;
3628 descriptor = ldq_phys(cs->as, descaddr);
3629 if (!(descriptor & 1) ||
3630 (!(descriptor & 2) && (level == 3))) {
3631 /* Invalid, or the Reserved level 3 encoding */
3634 descaddr = descriptor & 0xfffffff000ULL;
3636 if ((descriptor & 2) && (level < 3)) {
3637 /* Table entry. The top five bits are attributes which may
3638 * propagate down through lower levels of the table (and
3639 * which are all arranged so that 0 means "no effect", so
3640 * we can gather them up by ORing in the bits at each level).
3642 tableattrs |= extract64(descriptor, 59, 5);
3646 /* Block entry at level 1 or 2, or page entry at level 3.
3647 * These are basically the same thing, although the number
3648 * of bits we pull in from the vaddr varies.
3650 page_size = (1 << ((granule_sz * (4 - level)) + 3));
3651 descaddr |= (address & (page_size - 1));
3652 /* Extract attributes from the descriptor and merge with table attrs */
3653 if (arm_feature(env, ARM_FEATURE_V8)) {
3654 attrs = extract64(descriptor, 2, 10)
3655 | (extract64(descriptor, 53, 11) << 10);
3657 attrs = extract64(descriptor, 2, 10)
3658 | (extract64(descriptor, 52, 12) << 10);
3660 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
3661 attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
3662 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
3663 * means "force PL1 access only", which means forcing AP[1] to 0.
3665 if (extract32(tableattrs, 2, 1)) {
3668 /* Since we're always in the Non-secure state, NSTable is ignored. */
3671 /* Here descaddr is the final physical address, and attributes
3674 fault_type = access_fault;
3675 if ((attrs & (1 << 8)) == 0) {
3679 fault_type = permission_fault;
3680 if (is_user && !(attrs & (1 << 4))) {
3681 /* Unprivileged access not enabled */
3684 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3685 if (attrs & (1 << 12) || (!is_user && (attrs & (1 << 11)))) {
3687 if (access_type == 2) {
3690 *prot &= ~PAGE_EXEC;
3692 if (attrs & (1 << 5)) {
3693 /* Write access forbidden */
3694 if (access_type == 1) {
3697 *prot &= ~PAGE_WRITE;
3700 *phys_ptr = descaddr;
3701 *page_size_ptr = page_size;
3705 /* Long-descriptor format IFSR/DFSR value */
3706 return (1 << 9) | (fault_type << 2) | level;
3709 static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
3710 int access_type, int is_user,
3711 hwaddr *phys_ptr, int *prot)
3717 *phys_ptr = address;
3718 for (n = 7; n >= 0; n--) {
3719 base = env->cp15.c6_region[n];
3720 if ((base & 1) == 0)
3722 mask = 1 << ((base >> 1) & 0x1f);
3723 /* Keep this shift separate from the above to avoid an
3724 (undefined) << 32. */
3725 mask = (mask << 1) - 1;
3726 if (((base ^ address) & ~mask) == 0)
3732 if (access_type == 2) {
3733 mask = env->cp15.pmsav5_insn_ap;
3735 mask = env->cp15.pmsav5_data_ap;
3737 mask = (mask >> (n * 4)) & 0xf;
3744 *prot = PAGE_READ | PAGE_WRITE;
3749 *prot |= PAGE_WRITE;
3752 *prot = PAGE_READ | PAGE_WRITE;
3763 /* Bad permission. */
3770 /* get_phys_addr - get the physical address for this virtual address
3772 * Find the physical address corresponding to the given virtual address,
3773 * by doing a translation table walk on MMU based systems or using the
3774 * MPU state on MPU based systems.
3776 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
3777 * prot and page_size are not filled in, and the return value provides
3778 * information on why the translation aborted, in the format of a
3779 * DFSR/IFSR fault register, with the following caveats:
3780 * * we honour the short vs long DFSR format differences.
3781 * * the WnR bit is never set (the caller must do this).
3782 * * for MPU based systems we don't bother to return a full FSR format
3786 * @address: virtual address to get physical address for
3787 * @access_type: 0 for read, 1 for write, 2 for execute
3788 * @is_user: 0 for privileged access, 1 for user
3789 * @phys_ptr: set to the physical address corresponding to the virtual address
3790 * @prot: set to the permissions for the page containing phys_ptr
3791 * @page_size: set to the size of the page containing phys_ptr
3793 static inline int get_phys_addr(CPUARMState *env, target_ulong address,
3794 int access_type, int is_user,
3795 hwaddr *phys_ptr, int *prot,
3796 target_ulong *page_size)
3798 /* Fast Context Switch Extension. */
3799 if (address < 0x02000000)
3800 address += env->cp15.c13_fcse;
3802 if ((env->cp15.c1_sys & SCTLR_M) == 0) {
3803 /* MMU/MPU disabled. */
3804 *phys_ptr = address;
3805 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3806 *page_size = TARGET_PAGE_SIZE;
3808 } else if (arm_feature(env, ARM_FEATURE_MPU)) {
3809 *page_size = TARGET_PAGE_SIZE;
3810 return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
3812 } else if (extended_addresses_enabled(env)) {
3813 return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
3815 } else if (env->cp15.c1_sys & SCTLR_XP) {
3816 return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
3819 return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
3824 int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
3825 int access_type, int mmu_idx)
3827 ARMCPU *cpu = ARM_CPU(cs);
3828 CPUARMState *env = &cpu->env;
3830 target_ulong page_size;
3834 bool same_el = (arm_current_pl(env) != 0);
3836 is_user = mmu_idx == MMU_USER_IDX;
3837 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
3840 /* Map a single [sub]page. */
3841 phys_addr &= ~(hwaddr)0x3ff;
3842 address &= ~(target_ulong)0x3ff;
3843 tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
3847 /* AArch64 syndrome does not have an LPAE bit */
3848 syn = ret & ~(1 << 9);
3850 /* For insn and data aborts we assume there is no instruction syndrome
3851 * information; this is always true for exceptions reported to EL1.
3853 if (access_type == 2) {
3854 syn = syn_insn_abort(same_el, 0, 0, syn);
3855 cs->exception_index = EXCP_PREFETCH_ABORT;
3857 syn = syn_data_abort(same_el, 0, 0, 0, access_type == 1, syn);
3858 if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6)) {
3861 cs->exception_index = EXCP_DATA_ABORT;
3864 env->exception.syndrome = syn;
3865 env->exception.vaddress = address;
3866 env->exception.fsr = ret;
3870 hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
3872 ARMCPU *cpu = ARM_CPU(cs);
3874 target_ulong page_size;
3878 ret = get_phys_addr(&cpu->env, addr, 0, 0, &phys_addr, &prot, &page_size);
3887 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
3889 if ((env->uncached_cpsr & CPSR_M) == mode) {
3890 env->regs[13] = val;
3892 env->banked_r13[bank_number(mode)] = val;
3896 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
3898 if ((env->uncached_cpsr & CPSR_M) == mode) {
3899 return env->regs[13];
3901 return env->banked_r13[bank_number(mode)];
3905 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
3907 ARMCPU *cpu = arm_env_get_cpu(env);
3911 return xpsr_read(env) & 0xf8000000;
3913 return xpsr_read(env) & 0xf80001ff;
3915 return xpsr_read(env) & 0xff00fc00;
3917 return xpsr_read(env) & 0xff00fdff;
3919 return xpsr_read(env) & 0x000001ff;
3921 return xpsr_read(env) & 0x0700fc00;
3923 return xpsr_read(env) & 0x0700edff;
3925 return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
3927 return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
3928 case 16: /* PRIMASK */
3929 return (env->daif & PSTATE_I) != 0;
3930 case 17: /* BASEPRI */
3931 case 18: /* BASEPRI_MAX */
3932 return env->v7m.basepri;
3933 case 19: /* FAULTMASK */
3934 return (env->daif & PSTATE_F) != 0;
3935 case 20: /* CONTROL */
3936 return env->v7m.control;
3938 /* ??? For debugging only. */
3939 cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
3944 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
3946 ARMCPU *cpu = arm_env_get_cpu(env);
3950 xpsr_write(env, val, 0xf8000000);
3953 xpsr_write(env, val, 0xf8000000);
3956 xpsr_write(env, val, 0xfe00fc00);
3959 xpsr_write(env, val, 0xfe00fc00);
3962 /* IPSR bits are readonly. */
3965 xpsr_write(env, val, 0x0600fc00);
3968 xpsr_write(env, val, 0x0600fc00);
3971 if (env->v7m.current_sp)
3972 env->v7m.other_sp = val;
3974 env->regs[13] = val;
3977 if (env->v7m.current_sp)
3978 env->regs[13] = val;
3980 env->v7m.other_sp = val;
3982 case 16: /* PRIMASK */
3984 env->daif |= PSTATE_I;
3986 env->daif &= ~PSTATE_I;
3989 case 17: /* BASEPRI */
3990 env->v7m.basepri = val & 0xff;
3992 case 18: /* BASEPRI_MAX */
3994 if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
3995 env->v7m.basepri = val;
3997 case 19: /* FAULTMASK */
3999 env->daif |= PSTATE_F;
4001 env->daif &= ~PSTATE_F;
4004 case 20: /* CONTROL */
4005 env->v7m.control = val & 3;
4006 switch_v7m_sp(env, (val & 2) != 0);
4009 /* ??? For debugging only. */
4010 cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
4017 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
4019 /* Implement DC ZVA, which zeroes a fixed-length block of memory.
4020 * Note that we do not implement the (architecturally mandated)
4021 * alignment fault for attempts to use this on Device memory
4022 * (which matches the usual QEMU behaviour of not implementing either
4023 * alignment faults or any memory attribute handling).
4026 ARMCPU *cpu = arm_env_get_cpu(env);
4027 uint64_t blocklen = 4 << cpu->dcz_blocksize;
4028 uint64_t vaddr = vaddr_in & ~(blocklen - 1);
4030 #ifndef CONFIG_USER_ONLY
4032 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
4033 * the block size so we might have to do more than one TLB lookup.
4034 * We know that in fact for any v8 CPU the page size is at least 4K
4035 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
4036 * 1K as an artefact of legacy v5 subpage support being present in the
4037 * same QEMU executable.
4039 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
4040 void *hostaddr[maxidx];
4043 for (try = 0; try < 2; try++) {
4045 for (i = 0; i < maxidx; i++) {
4046 hostaddr[i] = tlb_vaddr_to_host(env,
4047 vaddr + TARGET_PAGE_SIZE * i,
4048 1, cpu_mmu_index(env));
4054 /* If it's all in the TLB it's fair game for just writing to;
4055 * we know we don't need to update dirty status, etc.
4057 for (i = 0; i < maxidx - 1; i++) {
4058 memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
4060 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
4063 /* OK, try a store and see if we can populate the tlb. This
4064 * might cause an exception if the memory isn't writable,
4065 * in which case we will longjmp out of here. We must for
4066 * this purpose use the actual register value passed to us
4067 * so that we get the fault address right.
4069 helper_ret_stb_mmu(env, vaddr_in, 0, cpu_mmu_index(env), GETRA());
4070 /* Now we can populate the other TLB entries, if any */
4071 for (i = 0; i < maxidx; i++) {
4072 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
4073 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
4074 helper_ret_stb_mmu(env, va, 0, cpu_mmu_index(env), GETRA());
4079 /* Slow path (probably attempt to do this to an I/O device or
4080 * similar, or clearing of a block of code we have translations
4081 * cached for). Just do a series of byte writes as the architecture
4082 * demands. It's not worth trying to use a cpu_physical_memory_map(),
4083 * memset(), unmap() sequence here because:
4084 * + we'd need to account for the blocksize being larger than a page
4085 * + the direct-RAM access case is almost always going to be dealt
4086 * with in the fastpath code above, so there's no speed benefit
4087 * + we would have to deal with the map returning NULL because the
4088 * bounce buffer was in use
4090 for (i = 0; i < blocklen; i++) {
4091 helper_ret_stb_mmu(env, vaddr + i, 0, cpu_mmu_index(env), GETRA());
4095 memset(g2h(vaddr), 0, blocklen);
4099 /* Note that signed overflow is undefined in C. The following routines are
4100 careful to use unsigned types where modulo arithmetic is required.
4101 Failure to do so _will_ break on newer gcc. */
4103 /* Signed saturating arithmetic. */
4105 /* Perform 16-bit signed saturating addition. */
4106 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
4111 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
4120 /* Perform 8-bit signed saturating addition. */
4121 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
4126 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
4135 /* Perform 16-bit signed saturating subtraction. */
4136 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
4141 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
4150 /* Perform 8-bit signed saturating subtraction. */
4151 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
4156 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
4165 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
4166 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
4167 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
4168 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
4171 #include "op_addsub.h"
4173 /* Unsigned saturating arithmetic. */
4174 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
4183 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
4191 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
4200 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
4208 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
4209 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
4210 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
4211 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
4214 #include "op_addsub.h"
4216 /* Signed modulo arithmetic. */
4217 #define SARITH16(a, b, n, op) do { \
4219 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
4220 RESULT(sum, n, 16); \
4222 ge |= 3 << (n * 2); \
4225 #define SARITH8(a, b, n, op) do { \
4227 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
4228 RESULT(sum, n, 8); \
4234 #define ADD16(a, b, n) SARITH16(a, b, n, +)
4235 #define SUB16(a, b, n) SARITH16(a, b, n, -)
4236 #define ADD8(a, b, n) SARITH8(a, b, n, +)
4237 #define SUB8(a, b, n) SARITH8(a, b, n, -)
4241 #include "op_addsub.h"
4243 /* Unsigned modulo arithmetic. */
4244 #define ADD16(a, b, n) do { \
4246 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
4247 RESULT(sum, n, 16); \
4248 if ((sum >> 16) == 1) \
4249 ge |= 3 << (n * 2); \
4252 #define ADD8(a, b, n) do { \
4254 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
4255 RESULT(sum, n, 8); \
4256 if ((sum >> 8) == 1) \
4260 #define SUB16(a, b, n) do { \
4262 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
4263 RESULT(sum, n, 16); \
4264 if ((sum >> 16) == 0) \
4265 ge |= 3 << (n * 2); \
4268 #define SUB8(a, b, n) do { \
4270 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
4271 RESULT(sum, n, 8); \
4272 if ((sum >> 8) == 0) \
4279 #include "op_addsub.h"
4281 /* Halved signed arithmetic. */
4282 #define ADD16(a, b, n) \
4283 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
4284 #define SUB16(a, b, n) \
4285 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
4286 #define ADD8(a, b, n) \
4287 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
4288 #define SUB8(a, b, n) \
4289 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
4292 #include "op_addsub.h"
4294 /* Halved unsigned arithmetic. */
4295 #define ADD16(a, b, n) \
4296 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4297 #define SUB16(a, b, n) \
4298 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4299 #define ADD8(a, b, n) \
4300 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4301 #define SUB8(a, b, n) \
4302 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4305 #include "op_addsub.h"
4307 static inline uint8_t do_usad(uint8_t a, uint8_t b)
4315 /* Unsigned sum of absolute byte differences. */
4316 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
4319 sum = do_usad(a, b);
4320 sum += do_usad(a >> 8, b >> 8);
4321 sum += do_usad(a >> 16, b >>16);
4322 sum += do_usad(a >> 24, b >> 24);
4326 /* For ARMv6 SEL instruction. */
4327 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
4340 return (a & mask) | (b & ~mask);
4343 /* VFP support. We follow the convention used for VFP instructions:
4344 Single precision routines have a "s" suffix, double precision a
4347 /* Convert host exception flags to vfp form. */
4348 static inline int vfp_exceptbits_from_host(int host_bits)
4350 int target_bits = 0;
4352 if (host_bits & float_flag_invalid)
4354 if (host_bits & float_flag_divbyzero)
4356 if (host_bits & float_flag_overflow)
4358 if (host_bits & (float_flag_underflow | float_flag_output_denormal))
4360 if (host_bits & float_flag_inexact)
4361 target_bits |= 0x10;
4362 if (host_bits & float_flag_input_denormal)
4363 target_bits |= 0x80;
4367 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
4372 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
4373 | (env->vfp.vec_len << 16)
4374 | (env->vfp.vec_stride << 20);
4375 i = get_float_exception_flags(&env->vfp.fp_status);
4376 i |= get_float_exception_flags(&env->vfp.standard_fp_status);
4377 fpscr |= vfp_exceptbits_from_host(i);
4381 uint32_t vfp_get_fpscr(CPUARMState *env)
4383 return HELPER(vfp_get_fpscr)(env);
4386 /* Convert vfp exception flags to target form. */
4387 static inline int vfp_exceptbits_to_host(int target_bits)
4391 if (target_bits & 1)
4392 host_bits |= float_flag_invalid;
4393 if (target_bits & 2)
4394 host_bits |= float_flag_divbyzero;
4395 if (target_bits & 4)
4396 host_bits |= float_flag_overflow;
4397 if (target_bits & 8)
4398 host_bits |= float_flag_underflow;
4399 if (target_bits & 0x10)
4400 host_bits |= float_flag_inexact;
4401 if (target_bits & 0x80)
4402 host_bits |= float_flag_input_denormal;
4406 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
4411 changed = env->vfp.xregs[ARM_VFP_FPSCR];
4412 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
4413 env->vfp.vec_len = (val >> 16) & 7;
4414 env->vfp.vec_stride = (val >> 20) & 3;
4417 if (changed & (3 << 22)) {
4418 i = (val >> 22) & 3;
4420 case FPROUNDING_TIEEVEN:
4421 i = float_round_nearest_even;
4423 case FPROUNDING_POSINF:
4426 case FPROUNDING_NEGINF:
4427 i = float_round_down;
4429 case FPROUNDING_ZERO:
4430 i = float_round_to_zero;
4433 set_float_rounding_mode(i, &env->vfp.fp_status);
4435 if (changed & (1 << 24)) {
4436 set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
4437 set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
4439 if (changed & (1 << 25))
4440 set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
4442 i = vfp_exceptbits_to_host(val);
4443 set_float_exception_flags(i, &env->vfp.fp_status);
4444 set_float_exception_flags(0, &env->vfp.standard_fp_status);
4447 void vfp_set_fpscr(CPUARMState *env, uint32_t val)
4449 HELPER(vfp_set_fpscr)(env, val);
4452 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
4454 #define VFP_BINOP(name) \
4455 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
4457 float_status *fpst = fpstp; \
4458 return float32_ ## name(a, b, fpst); \
4460 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
4462 float_status *fpst = fpstp; \
4463 return float64_ ## name(a, b, fpst); \
4475 float32 VFP_HELPER(neg, s)(float32 a)
4477 return float32_chs(a);
4480 float64 VFP_HELPER(neg, d)(float64 a)
4482 return float64_chs(a);
4485 float32 VFP_HELPER(abs, s)(float32 a)
4487 return float32_abs(a);
4490 float64 VFP_HELPER(abs, d)(float64 a)
4492 return float64_abs(a);
4495 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
4497 return float32_sqrt(a, &env->vfp.fp_status);
4500 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
4502 return float64_sqrt(a, &env->vfp.fp_status);
4505 /* XXX: check quiet/signaling case */
4506 #define DO_VFP_cmp(p, type) \
4507 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
4510 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
4511 case 0: flags = 0x6; break; \
4512 case -1: flags = 0x8; break; \
4513 case 1: flags = 0x2; break; \
4514 default: case 2: flags = 0x3; break; \
4516 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4517 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4519 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
4522 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
4523 case 0: flags = 0x6; break; \
4524 case -1: flags = 0x8; break; \
4525 case 1: flags = 0x2; break; \
4526 default: case 2: flags = 0x3; break; \
4528 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4529 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4531 DO_VFP_cmp(s, float32)
4532 DO_VFP_cmp(d, float64)
4535 /* Integer to float and float to integer conversions */
4537 #define CONV_ITOF(name, fsz, sign) \
4538 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
4540 float_status *fpst = fpstp; \
4541 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
4544 #define CONV_FTOI(name, fsz, sign, round) \
4545 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
4547 float_status *fpst = fpstp; \
4548 if (float##fsz##_is_any_nan(x)) { \
4549 float_raise(float_flag_invalid, fpst); \
4552 return float##fsz##_to_##sign##int32##round(x, fpst); \
4555 #define FLOAT_CONVS(name, p, fsz, sign) \
4556 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
4557 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
4558 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
4560 FLOAT_CONVS(si, s, 32, )
4561 FLOAT_CONVS(si, d, 64, )
4562 FLOAT_CONVS(ui, s, 32, u)
4563 FLOAT_CONVS(ui, d, 64, u)
4569 /* floating point conversion */
4570 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
4572 float64 r = float32_to_float64(x, &env->vfp.fp_status);
4573 /* ARM requires that S<->D conversion of any kind of NaN generates
4574 * a quiet NaN by forcing the most significant frac bit to 1.
4576 return float64_maybe_silence_nan(r);
4579 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
4581 float32 r = float64_to_float32(x, &env->vfp.fp_status);
4582 /* ARM requires that S<->D conversion of any kind of NaN generates
4583 * a quiet NaN by forcing the most significant frac bit to 1.
4585 return float32_maybe_silence_nan(r);
4588 /* VFP3 fixed point conversion. */
4589 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4590 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
4593 float_status *fpst = fpstp; \
4595 tmp = itype##_to_##float##fsz(x, fpst); \
4596 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
4599 /* Notice that we want only input-denormal exception flags from the
4600 * scalbn operation: the other possible flags (overflow+inexact if
4601 * we overflow to infinity, output-denormal) aren't correct for the
4602 * complete scale-and-convert operation.
4604 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
4605 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
4609 float_status *fpst = fpstp; \
4610 int old_exc_flags = get_float_exception_flags(fpst); \
4612 if (float##fsz##_is_any_nan(x)) { \
4613 float_raise(float_flag_invalid, fpst); \
4616 tmp = float##fsz##_scalbn(x, shift, fpst); \
4617 old_exc_flags |= get_float_exception_flags(fpst) \
4618 & float_flag_input_denormal; \
4619 set_float_exception_flags(old_exc_flags, fpst); \
4620 return float##fsz##_to_##itype##round(tmp, fpst); \
4623 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
4624 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4625 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
4626 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4628 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
4629 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4630 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4632 VFP_CONV_FIX(sh, d, 64, 64, int16)
4633 VFP_CONV_FIX(sl, d, 64, 64, int32)
4634 VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
4635 VFP_CONV_FIX(uh, d, 64, 64, uint16)
4636 VFP_CONV_FIX(ul, d, 64, 64, uint32)
4637 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
4638 VFP_CONV_FIX(sh, s, 32, 32, int16)
4639 VFP_CONV_FIX(sl, s, 32, 32, int32)
4640 VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
4641 VFP_CONV_FIX(uh, s, 32, 32, uint16)
4642 VFP_CONV_FIX(ul, s, 32, 32, uint32)
4643 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
4645 #undef VFP_CONV_FIX_FLOAT
4646 #undef VFP_CONV_FLOAT_FIX_ROUND
4648 /* Set the current fp rounding mode and return the old one.
4649 * The argument is a softfloat float_round_ value.
4651 uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
4653 float_status *fp_status = &env->vfp.fp_status;
4655 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
4656 set_float_rounding_mode(rmode, fp_status);
4661 /* Set the current fp rounding mode in the standard fp status and return
4662 * the old one. This is for NEON instructions that need to change the
4663 * rounding mode but wish to use the standard FPSCR values for everything
4664 * else. Always set the rounding mode back to the correct value after
4666 * The argument is a softfloat float_round_ value.
4668 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
4670 float_status *fp_status = &env->vfp.standard_fp_status;
4672 uint32_t prev_rmode = get_float_rounding_mode(fp_status);
4673 set_float_rounding_mode(rmode, fp_status);
4678 /* Half precision conversions. */
4679 static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
4681 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
4682 float32 r = float16_to_float32(make_float16(a), ieee, s);
4684 return float32_maybe_silence_nan(r);
4689 static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
4691 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
4692 float16 r = float32_to_float16(a, ieee, s);
4694 r = float16_maybe_silence_nan(r);
4696 return float16_val(r);
4699 float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
4701 return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
4704 uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
4706 return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
4709 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
4711 return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
4714 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
4716 return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
4719 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
4721 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
4722 float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
4724 return float64_maybe_silence_nan(r);
4729 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
4731 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
4732 float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
4734 r = float16_maybe_silence_nan(r);
4736 return float16_val(r);
4739 #define float32_two make_float32(0x40000000)
4740 #define float32_three make_float32(0x40400000)
4741 #define float32_one_point_five make_float32(0x3fc00000)
4743 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
4745 float_status *s = &env->vfp.standard_fp_status;
4746 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
4747 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
4748 if (!(float32_is_zero(a) || float32_is_zero(b))) {
4749 float_raise(float_flag_input_denormal, s);
4753 return float32_sub(float32_two, float32_mul(a, b, s), s);
4756 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
4758 float_status *s = &env->vfp.standard_fp_status;
4760 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
4761 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
4762 if (!(float32_is_zero(a) || float32_is_zero(b))) {
4763 float_raise(float_flag_input_denormal, s);
4765 return float32_one_point_five;
4767 product = float32_mul(a, b, s);
4768 return float32_div(float32_sub(float32_three, product, s), float32_two, s);
4773 /* Constants 256 and 512 are used in some helpers; we avoid relying on
4774 * int->float conversions at run-time. */
4775 #define float64_256 make_float64(0x4070000000000000LL)
4776 #define float64_512 make_float64(0x4080000000000000LL)
4777 #define float32_maxnorm make_float32(0x7f7fffff)
4778 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
4780 /* Reciprocal functions
4782 * The algorithm that must be used to calculate the estimate
4783 * is specified by the ARM ARM, see FPRecipEstimate()
4786 static float64 recip_estimate(float64 a, float_status *real_fp_status)
4788 /* These calculations mustn't set any fp exception flags,
4789 * so we use a local copy of the fp_status.
4791 float_status dummy_status = *real_fp_status;
4792 float_status *s = &dummy_status;
4793 /* q = (int)(a * 512.0) */
4794 float64 q = float64_mul(float64_512, a, s);
4795 int64_t q_int = float64_to_int64_round_to_zero(q, s);
4797 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
4798 q = int64_to_float64(q_int, s);
4799 q = float64_add(q, float64_half, s);
4800 q = float64_div(q, float64_512, s);
4801 q = float64_div(float64_one, q, s);
4803 /* s = (int)(256.0 * r + 0.5) */
4804 q = float64_mul(q, float64_256, s);
4805 q = float64_add(q, float64_half, s);
4806 q_int = float64_to_int64_round_to_zero(q, s);
4808 /* return (double)s / 256.0 */
4809 return float64_div(int64_to_float64(q_int, s), float64_256, s);
4812 /* Common wrapper to call recip_estimate */
4813 static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
4815 uint64_t val64 = float64_val(num);
4816 uint64_t frac = extract64(val64, 0, 52);
4817 int64_t exp = extract64(val64, 52, 11);
4819 float64 scaled, estimate;
4821 /* Generate the scaled number for the estimate function */
4823 if (extract64(frac, 51, 1) == 0) {
4825 frac = extract64(frac, 0, 50) << 2;
4827 frac = extract64(frac, 0, 51) << 1;
4831 /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
4832 scaled = make_float64((0x3feULL << 52)
4833 | extract64(frac, 44, 8) << 44);
4835 estimate = recip_estimate(scaled, fpst);
4837 /* Build new result */
4838 val64 = float64_val(estimate);
4839 sbit = 0x8000000000000000ULL & val64;
4841 frac = extract64(val64, 0, 52);
4844 frac = 1ULL << 51 | extract64(frac, 1, 51);
4845 } else if (exp == -1) {
4846 frac = 1ULL << 50 | extract64(frac, 2, 50);
4850 return make_float64(sbit | (exp << 52) | frac);
4853 static bool round_to_inf(float_status *fpst, bool sign_bit)
4855 switch (fpst->float_rounding_mode) {
4856 case float_round_nearest_even: /* Round to Nearest */
4858 case float_round_up: /* Round to +Inf */
4860 case float_round_down: /* Round to -Inf */
4862 case float_round_to_zero: /* Round to Zero */
4866 g_assert_not_reached();
4869 float32 HELPER(recpe_f32)(float32 input, void *fpstp)
4871 float_status *fpst = fpstp;
4872 float32 f32 = float32_squash_input_denormal(input, fpst);
4873 uint32_t f32_val = float32_val(f32);
4874 uint32_t f32_sbit = 0x80000000ULL & f32_val;
4875 int32_t f32_exp = extract32(f32_val, 23, 8);
4876 uint32_t f32_frac = extract32(f32_val, 0, 23);
4882 if (float32_is_any_nan(f32)) {
4884 if (float32_is_signaling_nan(f32)) {
4885 float_raise(float_flag_invalid, fpst);
4886 nan = float32_maybe_silence_nan(f32);
4888 if (fpst->default_nan_mode) {
4889 nan = float32_default_nan;
4892 } else if (float32_is_infinity(f32)) {
4893 return float32_set_sign(float32_zero, float32_is_neg(f32));
4894 } else if (float32_is_zero(f32)) {
4895 float_raise(float_flag_divbyzero, fpst);
4896 return float32_set_sign(float32_infinity, float32_is_neg(f32));
4897 } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
4898 /* Abs(value) < 2.0^-128 */
4899 float_raise(float_flag_overflow | float_flag_inexact, fpst);
4900 if (round_to_inf(fpst, f32_sbit)) {
4901 return float32_set_sign(float32_infinity, float32_is_neg(f32));
4903 return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
4905 } else if (f32_exp >= 253 && fpst->flush_to_zero) {
4906 float_raise(float_flag_underflow, fpst);
4907 return float32_set_sign(float32_zero, float32_is_neg(f32));
4911 f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
4912 r64 = call_recip_estimate(f64, 253, fpst);
4913 r64_val = float64_val(r64);
4914 r64_exp = extract64(r64_val, 52, 11);
4915 r64_frac = extract64(r64_val, 0, 52);
4917 /* result = sign : result_exp<7:0> : fraction<51:29>; */
4918 return make_float32(f32_sbit |
4919 (r64_exp & 0xff) << 23 |
4920 extract64(r64_frac, 29, 24));
4923 float64 HELPER(recpe_f64)(float64 input, void *fpstp)
4925 float_status *fpst = fpstp;
4926 float64 f64 = float64_squash_input_denormal(input, fpst);
4927 uint64_t f64_val = float64_val(f64);
4928 uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
4929 int64_t f64_exp = extract64(f64_val, 52, 11);
4935 /* Deal with any special cases */
4936 if (float64_is_any_nan(f64)) {
4938 if (float64_is_signaling_nan(f64)) {
4939 float_raise(float_flag_invalid, fpst);
4940 nan = float64_maybe_silence_nan(f64);
4942 if (fpst->default_nan_mode) {
4943 nan = float64_default_nan;
4946 } else if (float64_is_infinity(f64)) {
4947 return float64_set_sign(float64_zero, float64_is_neg(f64));
4948 } else if (float64_is_zero(f64)) {
4949 float_raise(float_flag_divbyzero, fpst);
4950 return float64_set_sign(float64_infinity, float64_is_neg(f64));
4951 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
4952 /* Abs(value) < 2.0^-1024 */
4953 float_raise(float_flag_overflow | float_flag_inexact, fpst);
4954 if (round_to_inf(fpst, f64_sbit)) {
4955 return float64_set_sign(float64_infinity, float64_is_neg(f64));
4957 return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
4959 } else if (f64_exp >= 1023 && fpst->flush_to_zero) {
4960 float_raise(float_flag_underflow, fpst);
4961 return float64_set_sign(float64_zero, float64_is_neg(f64));
4964 r64 = call_recip_estimate(f64, 2045, fpst);
4965 r64_val = float64_val(r64);
4966 r64_exp = extract64(r64_val, 52, 11);
4967 r64_frac = extract64(r64_val, 0, 52);
4969 /* result = sign : result_exp<10:0> : fraction<51:0> */
4970 return make_float64(f64_sbit |
4971 ((r64_exp & 0x7ff) << 52) |
4975 /* The algorithm that must be used to calculate the estimate
4976 * is specified by the ARM ARM.
4978 static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
4980 /* These calculations mustn't set any fp exception flags,
4981 * so we use a local copy of the fp_status.
4983 float_status dummy_status = *real_fp_status;
4984 float_status *s = &dummy_status;
4988 if (float64_lt(a, float64_half, s)) {
4989 /* range 0.25 <= a < 0.5 */
4991 /* a in units of 1/512 rounded down */
4992 /* q0 = (int)(a * 512.0); */
4993 q = float64_mul(float64_512, a, s);
4994 q_int = float64_to_int64_round_to_zero(q, s);
4996 /* reciprocal root r */
4997 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
4998 q = int64_to_float64(q_int, s);
4999 q = float64_add(q, float64_half, s);
5000 q = float64_div(q, float64_512, s);
5001 q = float64_sqrt(q, s);
5002 q = float64_div(float64_one, q, s);
5004 /* range 0.5 <= a < 1.0 */
5006 /* a in units of 1/256 rounded down */
5007 /* q1 = (int)(a * 256.0); */
5008 q = float64_mul(float64_256, a, s);
5009 int64_t q_int = float64_to_int64_round_to_zero(q, s);
5011 /* reciprocal root r */
5012 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
5013 q = int64_to_float64(q_int, s);
5014 q = float64_add(q, float64_half, s);
5015 q = float64_div(q, float64_256, s);
5016 q = float64_sqrt(q, s);
5017 q = float64_div(float64_one, q, s);
5019 /* r in units of 1/256 rounded to nearest */
5020 /* s = (int)(256.0 * r + 0.5); */
5022 q = float64_mul(q, float64_256,s );
5023 q = float64_add(q, float64_half, s);
5024 q_int = float64_to_int64_round_to_zero(q, s);
5026 /* return (double)s / 256.0;*/
5027 return float64_div(int64_to_float64(q_int, s), float64_256, s);
5030 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
5032 float_status *s = fpstp;
5033 float32 f32 = float32_squash_input_denormal(input, s);
5034 uint32_t val = float32_val(f32);
5035 uint32_t f32_sbit = 0x80000000 & val;
5036 int32_t f32_exp = extract32(val, 23, 8);
5037 uint32_t f32_frac = extract32(val, 0, 23);
5043 if (float32_is_any_nan(f32)) {
5045 if (float32_is_signaling_nan(f32)) {
5046 float_raise(float_flag_invalid, s);
5047 nan = float32_maybe_silence_nan(f32);
5049 if (s->default_nan_mode) {
5050 nan = float32_default_nan;
5053 } else if (float32_is_zero(f32)) {
5054 float_raise(float_flag_divbyzero, s);
5055 return float32_set_sign(float32_infinity, float32_is_neg(f32));
5056 } else if (float32_is_neg(f32)) {
5057 float_raise(float_flag_invalid, s);
5058 return float32_default_nan;
5059 } else if (float32_is_infinity(f32)) {
5060 return float32_zero;
5063 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
5064 * preserving the parity of the exponent. */
5066 f64_frac = ((uint64_t) f32_frac) << 29;
5068 while (extract64(f64_frac, 51, 1) == 0) {
5069 f64_frac = f64_frac << 1;
5070 f32_exp = f32_exp-1;
5072 f64_frac = extract64(f64_frac, 0, 51) << 1;
5075 if (extract64(f32_exp, 0, 1) == 0) {
5076 f64 = make_float64(((uint64_t) f32_sbit) << 32
5080 f64 = make_float64(((uint64_t) f32_sbit) << 32
5085 result_exp = (380 - f32_exp) / 2;
5087 f64 = recip_sqrt_estimate(f64, s);
5089 val64 = float64_val(f64);
5091 val = ((result_exp & 0xff) << 23)
5092 | ((val64 >> 29) & 0x7fffff);
5093 return make_float32(val);
5096 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
5098 float_status *s = fpstp;
5099 float64 f64 = float64_squash_input_denormal(input, s);
5100 uint64_t val = float64_val(f64);
5101 uint64_t f64_sbit = 0x8000000000000000ULL & val;
5102 int64_t f64_exp = extract64(val, 52, 11);
5103 uint64_t f64_frac = extract64(val, 0, 52);
5105 uint64_t result_frac;
5107 if (float64_is_any_nan(f64)) {
5109 if (float64_is_signaling_nan(f64)) {
5110 float_raise(float_flag_invalid, s);
5111 nan = float64_maybe_silence_nan(f64);
5113 if (s->default_nan_mode) {
5114 nan = float64_default_nan;
5117 } else if (float64_is_zero(f64)) {
5118 float_raise(float_flag_divbyzero, s);
5119 return float64_set_sign(float64_infinity, float64_is_neg(f64));
5120 } else if (float64_is_neg(f64)) {
5121 float_raise(float_flag_invalid, s);
5122 return float64_default_nan;
5123 } else if (float64_is_infinity(f64)) {
5124 return float64_zero;
5127 /* Scale and normalize to a double-precision value between 0.25 and 1.0,
5128 * preserving the parity of the exponent. */
5131 while (extract64(f64_frac, 51, 1) == 0) {
5132 f64_frac = f64_frac << 1;
5133 f64_exp = f64_exp - 1;
5135 f64_frac = extract64(f64_frac, 0, 51) << 1;
5138 if (extract64(f64_exp, 0, 1) == 0) {
5139 f64 = make_float64(f64_sbit
5143 f64 = make_float64(f64_sbit
5148 result_exp = (3068 - f64_exp) / 2;
5150 f64 = recip_sqrt_estimate(f64, s);
5152 result_frac = extract64(float64_val(f64), 0, 52);
5154 return make_float64(f64_sbit |
5155 ((result_exp & 0x7ff) << 52) |
5159 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
5161 float_status *s = fpstp;
5164 if ((a & 0x80000000) == 0) {
5168 f64 = make_float64((0x3feULL << 52)
5169 | ((int64_t)(a & 0x7fffffff) << 21));
5171 f64 = recip_estimate(f64, s);
5173 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
5176 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
5178 float_status *fpst = fpstp;
5181 if ((a & 0xc0000000) == 0) {
5185 if (a & 0x80000000) {
5186 f64 = make_float64((0x3feULL << 52)
5187 | ((uint64_t)(a & 0x7fffffff) << 21));
5188 } else { /* bits 31-30 == '01' */
5189 f64 = make_float64((0x3fdULL << 52)
5190 | ((uint64_t)(a & 0x3fffffff) << 22));
5193 f64 = recip_sqrt_estimate(f64, fpst);
5195 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
5198 /* VFPv4 fused multiply-accumulate */
5199 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
5201 float_status *fpst = fpstp;
5202 return float32_muladd(a, b, c, 0, fpst);
5205 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
5207 float_status *fpst = fpstp;
5208 return float64_muladd(a, b, c, 0, fpst);
5211 /* ARMv8 round to integral */
5212 float32 HELPER(rints_exact)(float32 x, void *fp_status)
5214 return float32_round_to_int(x, fp_status);
5217 float64 HELPER(rintd_exact)(float64 x, void *fp_status)
5219 return float64_round_to_int(x, fp_status);
5222 float32 HELPER(rints)(float32 x, void *fp_status)
5224 int old_flags = get_float_exception_flags(fp_status), new_flags;
5227 ret = float32_round_to_int(x, fp_status);
5229 /* Suppress any inexact exceptions the conversion produced */
5230 if (!(old_flags & float_flag_inexact)) {
5231 new_flags = get_float_exception_flags(fp_status);
5232 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
5238 float64 HELPER(rintd)(float64 x, void *fp_status)
5240 int old_flags = get_float_exception_flags(fp_status), new_flags;
5243 ret = float64_round_to_int(x, fp_status);
5245 new_flags = get_float_exception_flags(fp_status);
5247 /* Suppress any inexact exceptions the conversion produced */
5248 if (!(old_flags & float_flag_inexact)) {
5249 new_flags = get_float_exception_flags(fp_status);
5250 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
5256 /* Convert ARM rounding mode to softfloat */
5257 int arm_rmode_to_sf(int rmode)
5260 case FPROUNDING_TIEAWAY:
5261 rmode = float_round_ties_away;
5263 case FPROUNDING_ODD:
5264 /* FIXME: add support for TIEAWAY and ODD */
5265 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
5267 case FPROUNDING_TIEEVEN:
5269 rmode = float_round_nearest_even;
5271 case FPROUNDING_POSINF:
5272 rmode = float_round_up;
5274 case FPROUNDING_NEGINF:
5275 rmode = float_round_down;
5277 case FPROUNDING_ZERO:
5278 rmode = float_round_to_zero;
5284 static void crc_init_buffer(uint8_t *buf, uint32_t val, uint32_t bytes)
5289 buf[0] = val & 0xff;
5290 } else if (bytes == 2) {
5291 buf[0] = val & 0xff;
5292 buf[1] = (val >> 8) & 0xff;
5294 buf[0] = val & 0xff;
5295 buf[1] = (val >> 8) & 0xff;
5296 buf[2] = (val >> 16) & 0xff;
5297 buf[3] = (val >> 24) & 0xff;
5301 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
5305 crc_init_buffer(buf, val, bytes);
5307 /* zlib crc32 converts the accumulator and output to one's complement. */
5308 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
5311 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
5315 crc_init_buffer(buf, val, bytes);
5317 /* Linux crc32c converts the output to one's complement. */
5318 return crc32c(acc, buf, bytes) ^ 0xffffffff;