4 * This code is licensed under the GNU GPL v2 or later.
6 * SPDX-License-Identifier: GPL-2.0-or-later
9 #include "qemu/osdep.h"
10 #include "qemu/units.h"
11 #include "target/arm/idau.h"
14 #include "internals.h"
15 #include "exec/gdbstub.h"
16 #include "exec/helper-proto.h"
17 #include "qemu/host-utils.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/bitops.h"
20 #include "qemu/crc32c.h"
21 #include "qemu/qemu-print.h"
22 #include "exec/exec-all.h"
23 #include <zlib.h> /* For crc32 */
25 #include "hw/semihosting/semihost.h"
26 #include "sysemu/cpus.h"
27 #include "sysemu/cpu-timers.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/tcg.h"
30 #include "qemu/range.h"
31 #include "qapi/qapi-commands-machine-target.h"
32 #include "qapi/error.h"
33 #include "qemu/guest-random.h"
36 #include "exec/cpu_ldst.h"
37 #include "hw/semihosting/common-semi.h"
40 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
42 #ifndef CONFIG_USER_ONLY
44 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
45 MMUAccessType access_type, ARMMMUIdx mmu_idx,
47 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
48 target_ulong *page_size_ptr,
49 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
50 __attribute__((nonnull));
53 static void switch_mode(CPUARMState *env, int mode);
54 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
56 static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
58 ARMCPU *cpu = env_archcpu(env);
59 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
61 /* VFP data registers are always little-endian. */
63 return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
65 if (arm_feature(env, ARM_FEATURE_NEON)) {
66 /* Aliases for Q regs. */
69 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
70 return gdb_get_reg128(buf, q[0], q[1]);
73 switch (reg - nregs) {
74 case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break;
75 case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break;
76 case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break;
81 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
83 ARMCPU *cpu = env_archcpu(env);
84 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
87 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
90 if (arm_feature(env, ARM_FEATURE_NEON)) {
93 uint64_t *q = aa32_vfp_qreg(env, reg - 32);
95 q[1] = ldq_le_p(buf + 8);
99 switch (reg - nregs) {
100 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
101 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
102 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
107 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
112 /* 128 bit FP register - quads are in LE order */
113 uint64_t *q = aa64_vfp_qreg(env, reg);
114 return gdb_get_reg128(buf, q[1], q[0]);
118 return gdb_get_reg32(buf, vfp_get_fpsr(env));
121 return gdb_get_reg32(buf,vfp_get_fpcr(env));
127 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
131 /* 128 bit FP register */
133 uint64_t *q = aa64_vfp_qreg(env, reg);
134 q[0] = ldq_le_p(buf);
135 q[1] = ldq_le_p(buf + 8);
140 vfp_set_fpsr(env, ldl_p(buf));
144 vfp_set_fpcr(env, ldl_p(buf));
151 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
153 assert(ri->fieldoffset);
154 if (cpreg_field_is_64bit(ri)) {
155 return CPREG_FIELD64(env, ri);
157 return CPREG_FIELD32(env, ri);
161 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
164 assert(ri->fieldoffset);
165 if (cpreg_field_is_64bit(ri)) {
166 CPREG_FIELD64(env, ri) = value;
168 CPREG_FIELD32(env, ri) = value;
172 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
174 return (char *)env + ri->fieldoffset;
177 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
179 /* Raw read of a coprocessor register (as needed for migration, etc). */
180 if (ri->type & ARM_CP_CONST) {
181 return ri->resetvalue;
182 } else if (ri->raw_readfn) {
183 return ri->raw_readfn(env, ri);
184 } else if (ri->readfn) {
185 return ri->readfn(env, ri);
187 return raw_read(env, ri);
191 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
194 /* Raw write of a coprocessor register (as needed for migration, etc).
195 * Note that constant registers are treated as write-ignored; the
196 * caller should check for success by whether a readback gives the
199 if (ri->type & ARM_CP_CONST) {
201 } else if (ri->raw_writefn) {
202 ri->raw_writefn(env, ri, v);
203 } else if (ri->writefn) {
204 ri->writefn(env, ri, v);
206 raw_write(env, ri, v);
211 * arm_get/set_gdb_*: get/set a gdb register
212 * @env: the CPU state
213 * @buf: a buffer to copy to/from
214 * @reg: register number (offset from start of group)
216 * We return the number of bytes copied
219 static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
221 ARMCPU *cpu = env_archcpu(env);
222 const ARMCPRegInfo *ri;
225 key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
226 ri = get_arm_cp_reginfo(cpu->cp_regs, key);
228 if (cpreg_field_is_64bit(ri)) {
229 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
231 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
237 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
242 #ifdef TARGET_AARCH64
243 static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
245 ARMCPU *cpu = env_archcpu(env);
248 /* The first 32 registers are the zregs */
252 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
253 len += gdb_get_reg128(buf,
254 env->vfp.zregs[reg].d[vq * 2 + 1],
255 env->vfp.zregs[reg].d[vq * 2]);
260 return gdb_get_reg32(buf, vfp_get_fpsr(env));
262 return gdb_get_reg32(buf, vfp_get_fpcr(env));
263 /* then 16 predicates and the ffr */
268 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
269 len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
276 * We report in Vector Granules (VG) which is 64bit in a Z reg
277 * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
279 int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
280 return gdb_get_reg64(buf, vq * 2);
283 /* gdbstub asked for something out our range */
284 qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
291 static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
293 ARMCPU *cpu = env_archcpu(env);
295 /* The first 32 registers are the zregs */
297 /* The first 32 registers are the zregs */
301 uint64_t *p = (uint64_t *) buf;
302 for (vq = 0; vq < cpu->sve_max_vq; vq++) {
303 env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
304 env->vfp.zregs[reg].d[vq * 2] = *p++;
310 vfp_set_fpsr(env, *(uint32_t *)buf);
313 vfp_set_fpcr(env, *(uint32_t *)buf);
319 uint64_t *p = (uint64_t *) buf;
320 for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
321 env->vfp.pregs[preg].p[vq / 4] = *p++;
327 /* cannot set vg via gdbstub */
330 /* gdbstub asked for something out our range */
336 #endif /* TARGET_AARCH64 */
338 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
340 /* Return true if the regdef would cause an assertion if you called
341 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
342 * program bug for it not to have the NO_RAW flag).
343 * NB that returning false here doesn't necessarily mean that calling
344 * read/write_raw_cp_reg() is safe, because we can't distinguish "has
345 * read/write access functions which are safe for raw use" from "has
346 * read/write access functions which have side effects but has forgotten
347 * to provide raw access functions".
348 * The tests here line up with the conditions in read/write_raw_cp_reg()
349 * and assertions in raw_read()/raw_write().
351 if ((ri->type & ARM_CP_CONST) ||
353 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
359 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
361 /* Write the coprocessor state from cpu->env to the (index,value) list. */
365 for (i = 0; i < cpu->cpreg_array_len; i++) {
366 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
367 const ARMCPRegInfo *ri;
370 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
375 if (ri->type & ARM_CP_NO_RAW) {
379 newval = read_raw_cp_reg(&cpu->env, ri);
382 * Only sync if the previous list->cpustate sync succeeded.
383 * Rather than tracking the success/failure state for every
384 * item in the list, we just recheck "does the raw write we must
385 * have made in write_list_to_cpustate() read back OK" here.
387 uint64_t oldval = cpu->cpreg_values[i];
389 if (oldval == newval) {
393 write_raw_cp_reg(&cpu->env, ri, oldval);
394 if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
398 write_raw_cp_reg(&cpu->env, ri, newval);
400 cpu->cpreg_values[i] = newval;
405 bool write_list_to_cpustate(ARMCPU *cpu)
410 for (i = 0; i < cpu->cpreg_array_len; i++) {
411 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
412 uint64_t v = cpu->cpreg_values[i];
413 const ARMCPRegInfo *ri;
415 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
420 if (ri->type & ARM_CP_NO_RAW) {
423 /* Write value and confirm it reads back as written
424 * (to catch read-only registers and partially read-only
425 * registers where the incoming migration value doesn't match)
427 write_raw_cp_reg(&cpu->env, ri, v);
428 if (read_raw_cp_reg(&cpu->env, ri) != v) {
435 static void add_cpreg_to_list(gpointer key, gpointer opaque)
437 ARMCPU *cpu = opaque;
439 const ARMCPRegInfo *ri;
441 regidx = *(uint32_t *)key;
442 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
444 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
445 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
446 /* The value array need not be initialized at this point */
447 cpu->cpreg_array_len++;
451 static void count_cpreg(gpointer key, gpointer opaque)
453 ARMCPU *cpu = opaque;
455 const ARMCPRegInfo *ri;
457 regidx = *(uint32_t *)key;
458 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
460 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
461 cpu->cpreg_array_len++;
465 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
467 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
468 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
479 void init_cpreg_list(ARMCPU *cpu)
481 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
482 * Note that we require cpreg_tuples[] to be sorted by key ID.
487 keys = g_hash_table_get_keys(cpu->cp_regs);
488 keys = g_list_sort(keys, cpreg_key_compare);
490 cpu->cpreg_array_len = 0;
492 g_list_foreach(keys, count_cpreg, cpu);
494 arraylen = cpu->cpreg_array_len;
495 cpu->cpreg_indexes = g_new(uint64_t, arraylen);
496 cpu->cpreg_values = g_new(uint64_t, arraylen);
497 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
498 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
499 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
500 cpu->cpreg_array_len = 0;
502 g_list_foreach(keys, add_cpreg_to_list, cpu);
504 assert(cpu->cpreg_array_len == arraylen);
510 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
512 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
513 const ARMCPRegInfo *ri,
516 if (!is_a64(env) && arm_current_el(env) == 3 &&
517 arm_is_secure_below_el3(env)) {
518 return CP_ACCESS_TRAP_UNCATEGORIZED;
523 /* Some secure-only AArch32 registers trap to EL3 if used from
524 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
525 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
526 * We assume that the .access field is set to PL1_RW.
528 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
529 const ARMCPRegInfo *ri,
532 if (arm_current_el(env) == 3) {
535 if (arm_is_secure_below_el3(env)) {
536 if (env->cp15.scr_el3 & SCR_EEL2) {
537 return CP_ACCESS_TRAP_EL2;
539 return CP_ACCESS_TRAP_EL3;
541 /* This will be EL1 NS and EL2 NS, which just UNDEF */
542 return CP_ACCESS_TRAP_UNCATEGORIZED;
545 static uint64_t arm_mdcr_el2_eff(CPUARMState *env)
547 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
550 /* Check for traps to "powerdown debug" registers, which are controlled
553 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
556 int el = arm_current_el(env);
557 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
558 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) ||
559 (arm_hcr_el2_eff(env) & HCR_TGE);
561 if (el < 2 && mdcr_el2_tdosa) {
562 return CP_ACCESS_TRAP_EL2;
564 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
565 return CP_ACCESS_TRAP_EL3;
570 /* Check for traps to "debug ROM" registers, which are controlled
571 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
573 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
576 int el = arm_current_el(env);
577 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
578 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) ||
579 (arm_hcr_el2_eff(env) & HCR_TGE);
581 if (el < 2 && mdcr_el2_tdra) {
582 return CP_ACCESS_TRAP_EL2;
584 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
585 return CP_ACCESS_TRAP_EL3;
590 /* Check for traps to general debug registers, which are controlled
591 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
593 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
596 int el = arm_current_el(env);
597 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
598 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) ||
599 (arm_hcr_el2_eff(env) & HCR_TGE);
601 if (el < 2 && mdcr_el2_tda) {
602 return CP_ACCESS_TRAP_EL2;
604 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
605 return CP_ACCESS_TRAP_EL3;
610 /* Check for traps to performance monitor registers, which are controlled
611 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
613 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
616 int el = arm_current_el(env);
617 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
619 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
620 return CP_ACCESS_TRAP_EL2;
622 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
623 return CP_ACCESS_TRAP_EL3;
628 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
629 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
632 if (arm_current_el(env) == 1) {
633 uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
634 if (arm_hcr_el2_eff(env) & trap) {
635 return CP_ACCESS_TRAP_EL2;
641 /* Check for traps from EL1 due to HCR_EL2.TSW. */
642 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
645 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
646 return CP_ACCESS_TRAP_EL2;
651 /* Check for traps from EL1 due to HCR_EL2.TACR. */
652 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
655 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
656 return CP_ACCESS_TRAP_EL2;
661 /* Check for traps from EL1 due to HCR_EL2.TTLB. */
662 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
665 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
666 return CP_ACCESS_TRAP_EL2;
671 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
673 ARMCPU *cpu = env_archcpu(env);
675 raw_write(env, ri, value);
676 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
679 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
681 ARMCPU *cpu = env_archcpu(env);
683 if (raw_read(env, ri) != value) {
684 /* Unlike real hardware the qemu TLB uses virtual addresses,
685 * not modified virtual addresses, so this causes a TLB flush.
688 raw_write(env, ri, value);
692 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
695 ARMCPU *cpu = env_archcpu(env);
697 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
698 && !extended_addresses_enabled(env)) {
699 /* For VMSA (when not using the LPAE long descriptor page table
700 * format) this register includes the ASID, so do a TLB flush.
701 * For PMSA it is purely a process ID and no action is needed.
705 raw_write(env, ri, value);
708 /* IS variants of TLB operations must affect all cores */
709 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
712 CPUState *cs = env_cpu(env);
714 tlb_flush_all_cpus_synced(cs);
717 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
720 CPUState *cs = env_cpu(env);
722 tlb_flush_all_cpus_synced(cs);
725 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
728 CPUState *cs = env_cpu(env);
730 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
733 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
736 CPUState *cs = env_cpu(env);
738 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
742 * Non-IS variants of TLB operations are upgraded to
743 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
744 * force broadcast of these operations.
746 static bool tlb_force_broadcast(CPUARMState *env)
748 return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
751 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
754 /* Invalidate all (TLBIALL) */
755 CPUState *cs = env_cpu(env);
757 if (tlb_force_broadcast(env)) {
758 tlb_flush_all_cpus_synced(cs);
764 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
767 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
768 CPUState *cs = env_cpu(env);
770 value &= TARGET_PAGE_MASK;
771 if (tlb_force_broadcast(env)) {
772 tlb_flush_page_all_cpus_synced(cs, value);
774 tlb_flush_page(cs, value);
778 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
781 /* Invalidate by ASID (TLBIASID) */
782 CPUState *cs = env_cpu(env);
784 if (tlb_force_broadcast(env)) {
785 tlb_flush_all_cpus_synced(cs);
791 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
794 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
795 CPUState *cs = env_cpu(env);
797 value &= TARGET_PAGE_MASK;
798 if (tlb_force_broadcast(env)) {
799 tlb_flush_page_all_cpus_synced(cs, value);
801 tlb_flush_page(cs, value);
805 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
808 CPUState *cs = env_cpu(env);
810 tlb_flush_by_mmuidx(cs,
812 ARMMMUIdxBit_E10_1_PAN |
816 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
819 CPUState *cs = env_cpu(env);
821 tlb_flush_by_mmuidx_all_cpus_synced(cs,
823 ARMMMUIdxBit_E10_1_PAN |
828 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
831 CPUState *cs = env_cpu(env);
833 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
836 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
839 CPUState *cs = env_cpu(env);
841 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
844 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
847 CPUState *cs = env_cpu(env);
848 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
850 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
853 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
856 CPUState *cs = env_cpu(env);
857 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
859 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
863 static const ARMCPRegInfo cp_reginfo[] = {
864 /* Define the secure and non-secure FCSE identifier CP registers
865 * separately because there is no secure bank in V8 (no _EL3). This allows
866 * the secure register to be properly reset and migrated. There is also no
867 * v8 EL1 version of the register so the non-secure instance stands alone.
870 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
871 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
872 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
873 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
874 { .name = "FCSEIDR_S",
875 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
876 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
877 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
878 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
879 /* Define the secure and non-secure context identifier CP registers
880 * separately because there is no secure bank in V8 (no _EL3). This allows
881 * the secure register to be properly reset and migrated. In the
882 * non-secure case, the 32-bit register will have reset and migration
883 * disabled during registration as it is handled by the 64-bit instance.
885 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
886 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
887 .access = PL1_RW, .accessfn = access_tvm_trvm,
888 .secure = ARM_CP_SECSTATE_NS,
889 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
890 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
891 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
892 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
893 .access = PL1_RW, .accessfn = access_tvm_trvm,
894 .secure = ARM_CP_SECSTATE_S,
895 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
896 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
900 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
901 /* NB: Some of these registers exist in v8 but with more precise
902 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
904 /* MMU Domain access control / MPU write buffer control */
906 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
907 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
908 .writefn = dacr_write, .raw_writefn = raw_write,
909 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
910 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
911 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
912 * For v6 and v5, these mappings are overly broad.
914 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
915 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
916 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
917 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
918 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
919 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
920 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
921 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
922 /* Cache maintenance ops; some of this space may be overridden later. */
923 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
924 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
925 .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
929 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
930 /* Not all pre-v6 cores implemented this WFI, so this is slightly
933 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
934 .access = PL1_W, .type = ARM_CP_WFI },
938 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
939 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
940 * is UNPREDICTABLE; we choose to NOP as most implementations do).
942 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
943 .access = PL1_W, .type = ARM_CP_WFI },
944 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
945 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
946 * OMAPCP will override this space.
948 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
949 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
951 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
952 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
954 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
955 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
956 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
958 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
959 * implementing it as RAZ means the "debug architecture version" bits
960 * will read as a reserved value, which should cause Linux to not try
961 * to use the debug hardware.
963 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
964 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
965 /* MMU TLB control. Note that the wildcarding means we cover not just
966 * the unified TLB ops but also the dside/iside/inner-shareable variants.
968 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
969 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
970 .type = ARM_CP_NO_RAW },
971 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
972 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
973 .type = ARM_CP_NO_RAW },
974 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
975 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
976 .type = ARM_CP_NO_RAW },
977 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
978 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
979 .type = ARM_CP_NO_RAW },
980 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
981 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
982 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
983 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
987 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
992 /* In ARMv8 most bits of CPACR_EL1 are RES0. */
993 if (!arm_feature(env, ARM_FEATURE_V8)) {
994 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
995 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
996 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
998 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
999 /* VFP coprocessor: cp10 & cp11 [23:20] */
1000 mask |= (1 << 31) | (1 << 30) | (0xf << 20);
1002 if (!arm_feature(env, ARM_FEATURE_NEON)) {
1003 /* ASEDIS [31] bit is RAO/WI */
1007 /* VFPv3 and upwards with NEON implement 32 double precision
1008 * registers (D0-D31).
1010 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
1011 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1019 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1020 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1022 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1023 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1024 value &= ~(0xf << 20);
1025 value |= env->cp15.cpacr_el1 & (0xf << 20);
1028 env->cp15.cpacr_el1 = value;
1031 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1034 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1035 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1037 uint64_t value = env->cp15.cpacr_el1;
1039 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1040 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1041 value &= ~(0xf << 20);
1047 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1049 /* Call cpacr_write() so that we reset with the correct RAO bits set
1050 * for our CPU features.
1052 cpacr_write(env, ri, 0);
1055 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1058 if (arm_feature(env, ARM_FEATURE_V8)) {
1059 /* Check if CPACR accesses are to be trapped to EL2 */
1060 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
1061 (env->cp15.cptr_el[2] & CPTR_TCPAC)) {
1062 return CP_ACCESS_TRAP_EL2;
1063 /* Check if CPACR accesses are to be trapped to EL3 */
1064 } else if (arm_current_el(env) < 3 &&
1065 (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1066 return CP_ACCESS_TRAP_EL3;
1070 return CP_ACCESS_OK;
1073 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1076 /* Check if CPTR accesses are set to trap to EL3 */
1077 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1078 return CP_ACCESS_TRAP_EL3;
1081 return CP_ACCESS_OK;
1084 static const ARMCPRegInfo v6_cp_reginfo[] = {
1085 /* prefetch by MVA in v6, NOP in v7 */
1086 { .name = "MVA_prefetch",
1087 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
1088 .access = PL1_W, .type = ARM_CP_NOP },
1089 /* We need to break the TB after ISB to execute self-modifying code
1090 * correctly and also to take any pending interrupts immediately.
1091 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1093 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
1094 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
1095 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
1096 .access = PL0_W, .type = ARM_CP_NOP },
1097 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
1098 .access = PL0_W, .type = ARM_CP_NOP },
1099 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
1100 .access = PL1_RW, .accessfn = access_tvm_trvm,
1101 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
1102 offsetof(CPUARMState, cp15.ifar_ns) },
1104 /* Watchpoint Fault Address Register : should actually only be present
1105 * for 1136, 1176, 11MPCore.
1107 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1108 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1109 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1110 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1111 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1112 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1116 /* Definitions for the PMU registers */
1117 #define PMCRN_MASK 0xf800
1118 #define PMCRN_SHIFT 11
1127 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1128 * which can be written as 1 to trigger behaviour but which stay RAZ).
1130 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1132 #define PMXEVTYPER_P 0x80000000
1133 #define PMXEVTYPER_U 0x40000000
1134 #define PMXEVTYPER_NSK 0x20000000
1135 #define PMXEVTYPER_NSU 0x10000000
1136 #define PMXEVTYPER_NSH 0x08000000
1137 #define PMXEVTYPER_M 0x04000000
1138 #define PMXEVTYPER_MT 0x02000000
1139 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1140 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1141 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1142 PMXEVTYPER_M | PMXEVTYPER_MT | \
1143 PMXEVTYPER_EVTCOUNT)
1145 #define PMCCFILTR 0xf8000000
1146 #define PMCCFILTR_M PMXEVTYPER_M
1147 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1149 static inline uint32_t pmu_num_counters(CPUARMState *env)
1151 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1154 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1155 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1157 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1160 typedef struct pm_event {
1161 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1162 /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1163 bool (*supported)(CPUARMState *);
1165 * Retrieve the current count of the underlying event. The programmed
1166 * counters hold a difference from the return value from this function
1168 uint64_t (*get_count)(CPUARMState *);
1170 * Return how many nanoseconds it will take (at a minimum) for count events
1171 * to occur. A negative value indicates the counter will never overflow, or
1172 * that the counter has otherwise arranged for the overflow bit to be set
1173 * and the PMU interrupt to be raised on overflow.
1175 int64_t (*ns_per_count)(uint64_t);
1178 static bool event_always_supported(CPUARMState *env)
1183 static uint64_t swinc_get_count(CPUARMState *env)
1186 * SW_INCR events are written directly to the pmevcntr's by writes to
1187 * PMSWINC, so there is no underlying count maintained by the PMU itself
1192 static int64_t swinc_ns_per(uint64_t ignored)
1198 * Return the underlying cycle count for the PMU cycle counters. If we're in
1199 * usermode, simply return 0.
1201 static uint64_t cycles_get_count(CPUARMState *env)
1203 #ifndef CONFIG_USER_ONLY
1204 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1205 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1207 return cpu_get_host_ticks();
1211 #ifndef CONFIG_USER_ONLY
1212 static int64_t cycles_ns_per(uint64_t cycles)
1214 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1217 static bool instructions_supported(CPUARMState *env)
1219 return icount_enabled() == 1; /* Precise instruction counting */
1222 static uint64_t instructions_get_count(CPUARMState *env)
1224 return (uint64_t)icount_get_raw();
1227 static int64_t instructions_ns_per(uint64_t icount)
1229 return icount_to_ns((int64_t)icount);
1233 static bool pmu_8_1_events_supported(CPUARMState *env)
1235 /* For events which are supported in any v8.1 PMU */
1236 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
1239 static bool pmu_8_4_events_supported(CPUARMState *env)
1241 /* For events which are supported in any v8.1 PMU */
1242 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
1245 static uint64_t zero_event_get_count(CPUARMState *env)
1247 /* For events which on QEMU never fire, so their count is always zero */
1251 static int64_t zero_event_ns_per(uint64_t cycles)
1253 /* An event which never fires can never overflow */
1257 static const pm_event pm_events[] = {
1258 { .number = 0x000, /* SW_INCR */
1259 .supported = event_always_supported,
1260 .get_count = swinc_get_count,
1261 .ns_per_count = swinc_ns_per,
1263 #ifndef CONFIG_USER_ONLY
1264 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1265 .supported = instructions_supported,
1266 .get_count = instructions_get_count,
1267 .ns_per_count = instructions_ns_per,
1269 { .number = 0x011, /* CPU_CYCLES, Cycle */
1270 .supported = event_always_supported,
1271 .get_count = cycles_get_count,
1272 .ns_per_count = cycles_ns_per,
1275 { .number = 0x023, /* STALL_FRONTEND */
1276 .supported = pmu_8_1_events_supported,
1277 .get_count = zero_event_get_count,
1278 .ns_per_count = zero_event_ns_per,
1280 { .number = 0x024, /* STALL_BACKEND */
1281 .supported = pmu_8_1_events_supported,
1282 .get_count = zero_event_get_count,
1283 .ns_per_count = zero_event_ns_per,
1285 { .number = 0x03c, /* STALL */
1286 .supported = pmu_8_4_events_supported,
1287 .get_count = zero_event_get_count,
1288 .ns_per_count = zero_event_ns_per,
1293 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1294 * events (i.e. the statistical profiling extension), this implementation
1295 * should first be updated to something sparse instead of the current
1296 * supported_event_map[] array.
1298 #define MAX_EVENT_ID 0x3c
1299 #define UNSUPPORTED_EVENT UINT16_MAX
1300 static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1303 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1304 * of ARM event numbers to indices in our pm_events array.
1306 * Note: Events in the 0x40XX range are not currently supported.
1308 void pmu_init(ARMCPU *cpu)
1313 * Empty supported_event_map and cpu->pmceid[01] before adding supported
1316 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1317 supported_event_map[i] = UNSUPPORTED_EVENT;
1322 for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1323 const pm_event *cnt = &pm_events[i];
1324 assert(cnt->number <= MAX_EVENT_ID);
1325 /* We do not currently support events in the 0x40xx range */
1326 assert(cnt->number <= 0x3f);
1328 if (cnt->supported(&cpu->env)) {
1329 supported_event_map[cnt->number] = i;
1330 uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1331 if (cnt->number & 0x20) {
1332 cpu->pmceid1 |= event_mask;
1334 cpu->pmceid0 |= event_mask;
1341 * Check at runtime whether a PMU event is supported for the current machine
1343 static bool event_supported(uint16_t number)
1345 if (number > MAX_EVENT_ID) {
1348 return supported_event_map[number] != UNSUPPORTED_EVENT;
1351 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1354 /* Performance monitor registers user accessibility is controlled
1355 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1356 * trapping to EL2 or EL3 for other accesses.
1358 int el = arm_current_el(env);
1359 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1361 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1362 return CP_ACCESS_TRAP;
1364 if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1365 return CP_ACCESS_TRAP_EL2;
1367 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1368 return CP_ACCESS_TRAP_EL3;
1371 return CP_ACCESS_OK;
1374 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1375 const ARMCPRegInfo *ri,
1378 /* ER: event counter read trap control */
1379 if (arm_feature(env, ARM_FEATURE_V8)
1380 && arm_current_el(env) == 0
1381 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1383 return CP_ACCESS_OK;
1386 return pmreg_access(env, ri, isread);
1389 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1390 const ARMCPRegInfo *ri,
1393 /* SW: software increment write trap control */
1394 if (arm_feature(env, ARM_FEATURE_V8)
1395 && arm_current_el(env) == 0
1396 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1398 return CP_ACCESS_OK;
1401 return pmreg_access(env, ri, isread);
1404 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1405 const ARMCPRegInfo *ri,
1408 /* ER: event counter read trap control */
1409 if (arm_feature(env, ARM_FEATURE_V8)
1410 && arm_current_el(env) == 0
1411 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1412 return CP_ACCESS_OK;
1415 return pmreg_access(env, ri, isread);
1418 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1419 const ARMCPRegInfo *ri,
1422 /* CR: cycle counter read trap control */
1423 if (arm_feature(env, ARM_FEATURE_V8)
1424 && arm_current_el(env) == 0
1425 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1427 return CP_ACCESS_OK;
1430 return pmreg_access(env, ri, isread);
1433 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1434 * the current EL, security state, and register configuration.
1436 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1439 bool e, p, u, nsk, nsu, nsh, m;
1440 bool enabled, prohibited, filtered;
1441 bool secure = arm_is_secure(env);
1442 int el = arm_current_el(env);
1443 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1444 uint8_t hpmn = mdcr_el2 & MDCR_HPMN;
1446 if (!arm_feature(env, ARM_FEATURE_PMU)) {
1450 if (!arm_feature(env, ARM_FEATURE_EL2) ||
1451 (counter < hpmn || counter == 31)) {
1452 e = env->cp15.c9_pmcr & PMCRE;
1454 e = mdcr_el2 & MDCR_HPME;
1456 enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1459 if (el == 2 && (counter < hpmn || counter == 31)) {
1460 prohibited = mdcr_el2 & MDCR_HPMD;
1465 prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1466 !(env->cp15.mdcr_el3 & MDCR_SPME);
1469 if (prohibited && counter == 31) {
1470 prohibited = env->cp15.c9_pmcr & PMCRDP;
1473 if (counter == 31) {
1474 filter = env->cp15.pmccfiltr_el0;
1476 filter = env->cp15.c14_pmevtyper[counter];
1479 p = filter & PMXEVTYPER_P;
1480 u = filter & PMXEVTYPER_U;
1481 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1482 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1483 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1484 m = arm_el_is_aa64(env, 1) &&
1485 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1488 filtered = secure ? u : u != nsu;
1489 } else if (el == 1) {
1490 filtered = secure ? p : p != nsk;
1491 } else if (el == 2) {
1497 if (counter != 31) {
1499 * If not checking PMCCNTR, ensure the counter is setup to an event we
1502 uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1503 if (!event_supported(event)) {
1508 return enabled && !prohibited && !filtered;
1511 static void pmu_update_irq(CPUARMState *env)
1513 ARMCPU *cpu = env_archcpu(env);
1514 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1515 (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1519 * Ensure c15_ccnt is the guest-visible count so that operations such as
1520 * enabling/disabling the counter or filtering, modifying the count itself,
1521 * etc. can be done logically. This is essentially a no-op if the counter is
1522 * not enabled at the time of the call.
1524 static void pmccntr_op_start(CPUARMState *env)
1526 uint64_t cycles = cycles_get_count(env);
1528 if (pmu_counter_enabled(env, 31)) {
1529 uint64_t eff_cycles = cycles;
1530 if (env->cp15.c9_pmcr & PMCRD) {
1531 /* Increment once every 64 processor clock cycles */
1535 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1537 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1538 1ull << 63 : 1ull << 31;
1539 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1540 env->cp15.c9_pmovsr |= (1 << 31);
1541 pmu_update_irq(env);
1544 env->cp15.c15_ccnt = new_pmccntr;
1546 env->cp15.c15_ccnt_delta = cycles;
1550 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1551 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1554 static void pmccntr_op_finish(CPUARMState *env)
1556 if (pmu_counter_enabled(env, 31)) {
1557 #ifndef CONFIG_USER_ONLY
1558 /* Calculate when the counter will next overflow */
1559 uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1560 if (!(env->cp15.c9_pmcr & PMCRLC)) {
1561 remaining_cycles = (uint32_t)remaining_cycles;
1563 int64_t overflow_in = cycles_ns_per(remaining_cycles);
1565 if (overflow_in > 0) {
1566 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1568 ARMCPU *cpu = env_archcpu(env);
1569 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1573 uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1574 if (env->cp15.c9_pmcr & PMCRD) {
1575 /* Increment once every 64 processor clock cycles */
1578 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1582 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1585 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1587 if (event_supported(event)) {
1588 uint16_t event_idx = supported_event_map[event];
1589 count = pm_events[event_idx].get_count(env);
1592 if (pmu_counter_enabled(env, counter)) {
1593 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1595 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1596 env->cp15.c9_pmovsr |= (1 << counter);
1597 pmu_update_irq(env);
1599 env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1601 env->cp15.c14_pmevcntr_delta[counter] = count;
1604 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1606 if (pmu_counter_enabled(env, counter)) {
1607 #ifndef CONFIG_USER_ONLY
1608 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1609 uint16_t event_idx = supported_event_map[event];
1610 uint64_t delta = UINT32_MAX -
1611 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1612 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1614 if (overflow_in > 0) {
1615 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1617 ARMCPU *cpu = env_archcpu(env);
1618 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1622 env->cp15.c14_pmevcntr_delta[counter] -=
1623 env->cp15.c14_pmevcntr[counter];
1627 void pmu_op_start(CPUARMState *env)
1630 pmccntr_op_start(env);
1631 for (i = 0; i < pmu_num_counters(env); i++) {
1632 pmevcntr_op_start(env, i);
1636 void pmu_op_finish(CPUARMState *env)
1639 pmccntr_op_finish(env);
1640 for (i = 0; i < pmu_num_counters(env); i++) {
1641 pmevcntr_op_finish(env, i);
1645 void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1647 pmu_op_start(&cpu->env);
1650 void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1652 pmu_op_finish(&cpu->env);
1655 void arm_pmu_timer_cb(void *opaque)
1657 ARMCPU *cpu = opaque;
1660 * Update all the counter values based on the current underlying counts,
1661 * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1662 * has the effect of setting the cpu->pmu_timer to the next earliest time a
1663 * counter may expire.
1665 pmu_op_start(&cpu->env);
1666 pmu_op_finish(&cpu->env);
1669 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1674 if (value & PMCRC) {
1675 /* The counter has been reset */
1676 env->cp15.c15_ccnt = 0;
1679 if (value & PMCRP) {
1681 for (i = 0; i < pmu_num_counters(env); i++) {
1682 env->cp15.c14_pmevcntr[i] = 0;
1686 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
1687 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
1692 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1696 for (i = 0; i < pmu_num_counters(env); i++) {
1697 /* Increment a counter's count iff: */
1698 if ((value & (1 << i)) && /* counter's bit is set */
1699 /* counter is enabled and not filtered */
1700 pmu_counter_enabled(env, i) &&
1701 /* counter is SW_INCR */
1702 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1703 pmevcntr_op_start(env, i);
1706 * Detect if this write causes an overflow since we can't predict
1707 * PMSWINC overflows like we can for other events
1709 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1711 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1712 env->cp15.c9_pmovsr |= (1 << i);
1713 pmu_update_irq(env);
1716 env->cp15.c14_pmevcntr[i] = new_pmswinc;
1718 pmevcntr_op_finish(env, i);
1723 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1726 pmccntr_op_start(env);
1727 ret = env->cp15.c15_ccnt;
1728 pmccntr_op_finish(env);
1732 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1735 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1736 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1737 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1740 env->cp15.c9_pmselr = value & 0x1f;
1743 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1746 pmccntr_op_start(env);
1747 env->cp15.c15_ccnt = value;
1748 pmccntr_op_finish(env);
1751 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1754 uint64_t cur_val = pmccntr_read(env, NULL);
1756 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1759 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1762 pmccntr_op_start(env);
1763 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1764 pmccntr_op_finish(env);
1767 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1770 pmccntr_op_start(env);
1771 /* M is not accessible from AArch32 */
1772 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1773 (value & PMCCFILTR);
1774 pmccntr_op_finish(env);
1777 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1779 /* M is not visible in AArch32 */
1780 return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1783 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1786 value &= pmu_counter_mask(env);
1787 env->cp15.c9_pmcnten |= value;
1790 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1793 value &= pmu_counter_mask(env);
1794 env->cp15.c9_pmcnten &= ~value;
1797 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1800 value &= pmu_counter_mask(env);
1801 env->cp15.c9_pmovsr &= ~value;
1802 pmu_update_irq(env);
1805 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1808 value &= pmu_counter_mask(env);
1809 env->cp15.c9_pmovsr |= value;
1810 pmu_update_irq(env);
1813 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1814 uint64_t value, const uint8_t counter)
1816 if (counter == 31) {
1817 pmccfiltr_write(env, ri, value);
1818 } else if (counter < pmu_num_counters(env)) {
1819 pmevcntr_op_start(env, counter);
1822 * If this counter's event type is changing, store the current
1823 * underlying count for the new type in c14_pmevcntr_delta[counter] so
1824 * pmevcntr_op_finish has the correct baseline when it converts back to
1827 uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1828 PMXEVTYPER_EVTCOUNT;
1829 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1830 if (old_event != new_event) {
1832 if (event_supported(new_event)) {
1833 uint16_t event_idx = supported_event_map[new_event];
1834 count = pm_events[event_idx].get_count(env);
1836 env->cp15.c14_pmevcntr_delta[counter] = count;
1839 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1840 pmevcntr_op_finish(env, counter);
1842 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1843 * PMSELR value is equal to or greater than the number of implemented
1844 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1848 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1849 const uint8_t counter)
1851 if (counter == 31) {
1852 return env->cp15.pmccfiltr_el0;
1853 } else if (counter < pmu_num_counters(env)) {
1854 return env->cp15.c14_pmevtyper[counter];
1857 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1858 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1864 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1867 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1868 pmevtyper_write(env, ri, value, counter);
1871 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1874 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1875 env->cp15.c14_pmevtyper[counter] = value;
1878 * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1879 * pmu_op_finish calls when loading saved state for a migration. Because
1880 * we're potentially updating the type of event here, the value written to
1881 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1882 * different counter type. Therefore, we need to set this value to the
1883 * current count for the counter type we're writing so that pmu_op_finish
1884 * has the correct count for its calculation.
1886 uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1887 if (event_supported(event)) {
1888 uint16_t event_idx = supported_event_map[event];
1889 env->cp15.c14_pmevcntr_delta[counter] =
1890 pm_events[event_idx].get_count(env);
1894 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1896 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1897 return pmevtyper_read(env, ri, counter);
1900 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1903 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1906 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1908 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1911 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1912 uint64_t value, uint8_t counter)
1914 if (counter < pmu_num_counters(env)) {
1915 pmevcntr_op_start(env, counter);
1916 env->cp15.c14_pmevcntr[counter] = value;
1917 pmevcntr_op_finish(env, counter);
1920 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1921 * are CONSTRAINED UNPREDICTABLE.
1925 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1928 if (counter < pmu_num_counters(env)) {
1930 pmevcntr_op_start(env, counter);
1931 ret = env->cp15.c14_pmevcntr[counter];
1932 pmevcntr_op_finish(env, counter);
1935 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1936 * are CONSTRAINED UNPREDICTABLE. */
1941 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1944 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1945 pmevcntr_write(env, ri, value, counter);
1948 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1950 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1951 return pmevcntr_read(env, ri, counter);
1954 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1957 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1958 assert(counter < pmu_num_counters(env));
1959 env->cp15.c14_pmevcntr[counter] = value;
1960 pmevcntr_write(env, ri, value, counter);
1963 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1965 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1966 assert(counter < pmu_num_counters(env));
1967 return env->cp15.c14_pmevcntr[counter];
1970 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1973 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1976 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1978 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1981 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1984 if (arm_feature(env, ARM_FEATURE_V8)) {
1985 env->cp15.c9_pmuserenr = value & 0xf;
1987 env->cp15.c9_pmuserenr = value & 1;
1991 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1994 /* We have no event counters so only the C bit can be changed */
1995 value &= pmu_counter_mask(env);
1996 env->cp15.c9_pminten |= value;
1997 pmu_update_irq(env);
2000 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2003 value &= pmu_counter_mask(env);
2004 env->cp15.c9_pminten &= ~value;
2005 pmu_update_irq(env);
2008 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2011 /* Note that even though the AArch64 view of this register has bits
2012 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2013 * architectural requirements for bits which are RES0 only in some
2014 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2015 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2017 raw_write(env, ri, value & ~0x1FULL);
2020 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2022 /* Begin with base v8.0 state. */
2023 uint32_t valid_mask = 0x3fff;
2024 ARMCPU *cpu = env_archcpu(env);
2026 if (ri->state == ARM_CP_STATE_AA64) {
2027 if (arm_feature(env, ARM_FEATURE_AARCH64) &&
2028 !cpu_isar_feature(aa64_aa32_el1, cpu)) {
2029 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
2031 valid_mask &= ~SCR_NET;
2033 if (cpu_isar_feature(aa64_lor, cpu)) {
2034 valid_mask |= SCR_TLOR;
2036 if (cpu_isar_feature(aa64_pauth, cpu)) {
2037 valid_mask |= SCR_API | SCR_APK;
2039 if (cpu_isar_feature(aa64_sel2, cpu)) {
2040 valid_mask |= SCR_EEL2;
2042 if (cpu_isar_feature(aa64_mte, cpu)) {
2043 valid_mask |= SCR_ATA;
2046 valid_mask &= ~(SCR_RW | SCR_ST);
2049 if (!arm_feature(env, ARM_FEATURE_EL2)) {
2050 valid_mask &= ~SCR_HCE;
2052 /* On ARMv7, SMD (or SCD as it is called in v7) is only
2053 * supported if EL2 exists. The bit is UNK/SBZP when
2054 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2055 * when EL2 is unavailable.
2056 * On ARMv8, this bit is always available.
2058 if (arm_feature(env, ARM_FEATURE_V7) &&
2059 !arm_feature(env, ARM_FEATURE_V8)) {
2060 valid_mask &= ~SCR_SMD;
2064 /* Clear all-context RES0 bits. */
2065 value &= valid_mask;
2066 raw_write(env, ri, value);
2069 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2072 * scr_write will set the RES1 bits on an AArch64-only CPU.
2073 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
2075 scr_write(env, ri, 0);
2078 static CPAccessResult access_aa64_tid2(CPUARMState *env,
2079 const ARMCPRegInfo *ri,
2082 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
2083 return CP_ACCESS_TRAP_EL2;
2086 return CP_ACCESS_OK;
2089 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2091 ARMCPU *cpu = env_archcpu(env);
2093 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2096 uint32_t index = A32_BANKED_REG_GET(env, csselr,
2097 ri->secure & ARM_CP_SECSTATE_S);
2099 return cpu->ccsidr[index];
2102 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2105 raw_write(env, ri, value & 0xf);
2108 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2110 CPUState *cs = env_cpu(env);
2111 bool el1 = arm_current_el(env) == 1;
2112 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
2115 if (hcr_el2 & HCR_IMO) {
2116 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2120 if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2125 if (hcr_el2 & HCR_FMO) {
2126 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2130 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2135 /* External aborts are not possible in QEMU so A bit is always clear */
2139 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2142 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2143 return CP_ACCESS_TRAP_EL2;
2146 return CP_ACCESS_OK;
2149 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2152 if (arm_feature(env, ARM_FEATURE_V8)) {
2153 return access_aa64_tid1(env, ri, isread);
2156 return CP_ACCESS_OK;
2159 static const ARMCPRegInfo v7_cp_reginfo[] = {
2160 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2161 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2162 .access = PL1_W, .type = ARM_CP_NOP },
2163 /* Performance monitors are implementation defined in v7,
2164 * but with an ARM recommended set of registers, which we
2167 * Performance registers fall into three categories:
2168 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2169 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2170 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2171 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2172 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2174 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2175 .access = PL0_RW, .type = ARM_CP_ALIAS,
2176 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2177 .writefn = pmcntenset_write,
2178 .accessfn = pmreg_access,
2179 .raw_writefn = raw_write },
2180 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
2181 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2182 .access = PL0_RW, .accessfn = pmreg_access,
2183 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2184 .writefn = pmcntenset_write, .raw_writefn = raw_write },
2185 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2187 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2188 .accessfn = pmreg_access,
2189 .writefn = pmcntenclr_write,
2190 .type = ARM_CP_ALIAS },
2191 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2192 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2193 .access = PL0_RW, .accessfn = pmreg_access,
2194 .type = ARM_CP_ALIAS,
2195 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2196 .writefn = pmcntenclr_write },
2197 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2198 .access = PL0_RW, .type = ARM_CP_IO,
2199 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2200 .accessfn = pmreg_access,
2201 .writefn = pmovsr_write,
2202 .raw_writefn = raw_write },
2203 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2204 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2205 .access = PL0_RW, .accessfn = pmreg_access,
2206 .type = ARM_CP_ALIAS | ARM_CP_IO,
2207 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2208 .writefn = pmovsr_write,
2209 .raw_writefn = raw_write },
2210 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2211 .access = PL0_W, .accessfn = pmreg_access_swinc,
2212 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2213 .writefn = pmswinc_write },
2214 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2215 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2216 .access = PL0_W, .accessfn = pmreg_access_swinc,
2217 .type = ARM_CP_NO_RAW | ARM_CP_IO,
2218 .writefn = pmswinc_write },
2219 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2220 .access = PL0_RW, .type = ARM_CP_ALIAS,
2221 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2222 .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2223 .raw_writefn = raw_write},
2224 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2225 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2226 .access = PL0_RW, .accessfn = pmreg_access_selr,
2227 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2228 .writefn = pmselr_write, .raw_writefn = raw_write, },
2229 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2230 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2231 .readfn = pmccntr_read, .writefn = pmccntr_write32,
2232 .accessfn = pmreg_access_ccntr },
2233 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2234 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2235 .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2237 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2238 .readfn = pmccntr_read, .writefn = pmccntr_write,
2239 .raw_readfn = raw_read, .raw_writefn = raw_write, },
2240 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2241 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2242 .access = PL0_RW, .accessfn = pmreg_access,
2243 .type = ARM_CP_ALIAS | ARM_CP_IO,
2245 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2246 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2247 .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2248 .access = PL0_RW, .accessfn = pmreg_access,
2250 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2252 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2253 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2254 .accessfn = pmreg_access,
2255 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2256 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2257 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2258 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2259 .accessfn = pmreg_access,
2260 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2261 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2262 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2263 .accessfn = pmreg_access_xevcntr,
2264 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2265 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2266 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2267 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2268 .accessfn = pmreg_access_xevcntr,
2269 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2270 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2271 .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2272 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2274 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2275 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2276 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2277 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2278 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2280 .writefn = pmuserenr_write, .raw_writefn = raw_write },
2281 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2282 .access = PL1_RW, .accessfn = access_tpm,
2283 .type = ARM_CP_ALIAS | ARM_CP_IO,
2284 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2286 .writefn = pmintenset_write, .raw_writefn = raw_write },
2287 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2288 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2289 .access = PL1_RW, .accessfn = access_tpm,
2291 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2292 .writefn = pmintenset_write, .raw_writefn = raw_write,
2293 .resetvalue = 0x0 },
2294 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2295 .access = PL1_RW, .accessfn = access_tpm,
2296 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2297 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2298 .writefn = pmintenclr_write, },
2299 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2300 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2301 .access = PL1_RW, .accessfn = access_tpm,
2302 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2303 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2304 .writefn = pmintenclr_write },
2305 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2306 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2308 .accessfn = access_aa64_tid2,
2309 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2310 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2311 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2313 .accessfn = access_aa64_tid2,
2314 .writefn = csselr_write, .resetvalue = 0,
2315 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2316 offsetof(CPUARMState, cp15.csselr_ns) } },
2317 /* Auxiliary ID register: this actually has an IMPDEF value but for now
2318 * just RAZ for all cores:
2320 { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2321 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2322 .access = PL1_R, .type = ARM_CP_CONST,
2323 .accessfn = access_aa64_tid1,
2325 /* Auxiliary fault status registers: these also are IMPDEF, and we
2326 * choose to RAZ/WI for all cores.
2328 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2329 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2330 .access = PL1_RW, .accessfn = access_tvm_trvm,
2331 .type = ARM_CP_CONST, .resetvalue = 0 },
2332 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2333 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2334 .access = PL1_RW, .accessfn = access_tvm_trvm,
2335 .type = ARM_CP_CONST, .resetvalue = 0 },
2336 /* MAIR can just read-as-written because we don't implement caches
2337 * and so don't need to care about memory attributes.
2339 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2340 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2341 .access = PL1_RW, .accessfn = access_tvm_trvm,
2342 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2344 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2345 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2346 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2348 /* For non-long-descriptor page tables these are PRRR and NMRR;
2349 * regardless they still act as reads-as-written for QEMU.
2351 /* MAIR0/1 are defined separately from their 64-bit counterpart which
2352 * allows them to assign the correct fieldoffset based on the endianness
2353 * handled in the field definitions.
2355 { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2356 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2357 .access = PL1_RW, .accessfn = access_tvm_trvm,
2358 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2359 offsetof(CPUARMState, cp15.mair0_ns) },
2360 .resetfn = arm_cp_reset_ignore },
2361 { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2362 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2363 .access = PL1_RW, .accessfn = access_tvm_trvm,
2364 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2365 offsetof(CPUARMState, cp15.mair1_ns) },
2366 .resetfn = arm_cp_reset_ignore },
2367 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2368 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2369 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2370 /* 32 bit ITLB invalidates */
2371 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2372 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2373 .writefn = tlbiall_write },
2374 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2375 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2376 .writefn = tlbimva_write },
2377 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2378 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2379 .writefn = tlbiasid_write },
2380 /* 32 bit DTLB invalidates */
2381 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2382 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2383 .writefn = tlbiall_write },
2384 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2385 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2386 .writefn = tlbimva_write },
2387 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2388 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2389 .writefn = tlbiasid_write },
2390 /* 32 bit TLB invalidates */
2391 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2392 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2393 .writefn = tlbiall_write },
2394 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2395 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2396 .writefn = tlbimva_write },
2397 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2398 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2399 .writefn = tlbiasid_write },
2400 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2401 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2402 .writefn = tlbimvaa_write },
2406 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2407 /* 32 bit TLB invalidates, Inner Shareable */
2408 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2409 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2410 .writefn = tlbiall_is_write },
2411 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2412 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2413 .writefn = tlbimva_is_write },
2414 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2415 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2416 .writefn = tlbiasid_is_write },
2417 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2418 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2419 .writefn = tlbimvaa_is_write },
2423 static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2424 /* PMOVSSET is not implemented in v7 before v7ve */
2425 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2426 .access = PL0_RW, .accessfn = pmreg_access,
2427 .type = ARM_CP_ALIAS | ARM_CP_IO,
2428 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2429 .writefn = pmovsset_write,
2430 .raw_writefn = raw_write },
2431 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2432 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2433 .access = PL0_RW, .accessfn = pmreg_access,
2434 .type = ARM_CP_ALIAS | ARM_CP_IO,
2435 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2436 .writefn = pmovsset_write,
2437 .raw_writefn = raw_write },
2441 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2448 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2451 if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2452 return CP_ACCESS_TRAP;
2454 return CP_ACCESS_OK;
2457 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2458 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2459 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2461 .writefn = teecr_write },
2462 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2463 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2464 .accessfn = teehbr_access, .resetvalue = 0 },
2468 static const ARMCPRegInfo v6k_cp_reginfo[] = {
2469 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2470 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2472 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2473 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2475 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2476 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2477 .resetfn = arm_cp_reset_ignore },
2478 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2479 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2480 .access = PL0_R|PL1_W,
2481 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2483 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2484 .access = PL0_R|PL1_W,
2485 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2486 offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2487 .resetfn = arm_cp_reset_ignore },
2488 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2489 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2491 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2492 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2494 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2495 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2500 #ifndef CONFIG_USER_ONLY
2502 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2505 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2506 * Writable only at the highest implemented exception level.
2508 int el = arm_current_el(env);
2514 hcr = arm_hcr_el2_eff(env);
2515 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2516 cntkctl = env->cp15.cnthctl_el2;
2518 cntkctl = env->cp15.c14_cntkctl;
2520 if (!extract32(cntkctl, 0, 2)) {
2521 return CP_ACCESS_TRAP;
2525 if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2526 arm_is_secure_below_el3(env)) {
2527 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2528 return CP_ACCESS_TRAP_UNCATEGORIZED;
2536 if (!isread && el < arm_highest_el(env)) {
2537 return CP_ACCESS_TRAP_UNCATEGORIZED;
2540 return CP_ACCESS_OK;
2543 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2546 unsigned int cur_el = arm_current_el(env);
2547 bool has_el2 = arm_is_el2_enabled(env);
2548 uint64_t hcr = arm_hcr_el2_eff(env);
2552 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2553 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2554 return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2555 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2558 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2559 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2560 return CP_ACCESS_TRAP;
2563 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2564 if (hcr & HCR_E2H) {
2565 if (timeridx == GTIMER_PHYS &&
2566 !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2567 return CP_ACCESS_TRAP_EL2;
2570 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2571 if (has_el2 && timeridx == GTIMER_PHYS &&
2572 !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2573 return CP_ACCESS_TRAP_EL2;
2579 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2580 if (has_el2 && timeridx == GTIMER_PHYS &&
2582 ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2583 : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2584 return CP_ACCESS_TRAP_EL2;
2588 return CP_ACCESS_OK;
2591 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2594 unsigned int cur_el = arm_current_el(env);
2595 bool has_el2 = arm_is_el2_enabled(env);
2596 uint64_t hcr = arm_hcr_el2_eff(env);
2600 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2601 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2602 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2603 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2607 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2608 * EL0 if EL0[PV]TEN is zero.
2610 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2611 return CP_ACCESS_TRAP;
2616 if (has_el2 && timeridx == GTIMER_PHYS) {
2617 if (hcr & HCR_E2H) {
2618 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2619 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2620 return CP_ACCESS_TRAP_EL2;
2623 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2624 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2625 return CP_ACCESS_TRAP_EL2;
2631 return CP_ACCESS_OK;
2634 static CPAccessResult gt_pct_access(CPUARMState *env,
2635 const ARMCPRegInfo *ri,
2638 return gt_counter_access(env, GTIMER_PHYS, isread);
2641 static CPAccessResult gt_vct_access(CPUARMState *env,
2642 const ARMCPRegInfo *ri,
2645 return gt_counter_access(env, GTIMER_VIRT, isread);
2648 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2651 return gt_timer_access(env, GTIMER_PHYS, isread);
2654 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2657 return gt_timer_access(env, GTIMER_VIRT, isread);
2660 static CPAccessResult gt_stimer_access(CPUARMState *env,
2661 const ARMCPRegInfo *ri,
2664 /* The AArch64 register view of the secure physical timer is
2665 * always accessible from EL3, and configurably accessible from
2668 switch (arm_current_el(env)) {
2670 if (!arm_is_secure(env)) {
2671 return CP_ACCESS_TRAP;
2673 if (!(env->cp15.scr_el3 & SCR_ST)) {
2674 return CP_ACCESS_TRAP_EL3;
2676 return CP_ACCESS_OK;
2679 return CP_ACCESS_TRAP;
2681 return CP_ACCESS_OK;
2683 g_assert_not_reached();
2687 static uint64_t gt_get_countervalue(CPUARMState *env)
2689 ARMCPU *cpu = env_archcpu(env);
2691 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2694 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2696 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2699 /* Timer enabled: calculate and set current ISTATUS, irq, and
2700 * reset timer to when ISTATUS next has to change
2702 uint64_t offset = timeridx == GTIMER_VIRT ?
2703 cpu->env.cp15.cntvoff_el2 : 0;
2704 uint64_t count = gt_get_countervalue(&cpu->env);
2705 /* Note that this must be unsigned 64 bit arithmetic: */
2706 int istatus = count - offset >= gt->cval;
2710 gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2712 irqstate = (istatus && !(gt->ctl & 2));
2713 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2716 /* Next transition is when count rolls back over to zero */
2717 nexttick = UINT64_MAX;
2719 /* Next transition is when we hit cval */
2720 nexttick = gt->cval + offset;
2722 /* Note that the desired next expiry time might be beyond the
2723 * signed-64-bit range of a QEMUTimer -- in this case we just
2724 * set the timer for as far in the future as possible. When the
2725 * timer expires we will reset the timer for any remaining period.
2727 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2728 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2730 timer_mod(cpu->gt_timer[timeridx], nexttick);
2732 trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2734 /* Timer disabled: ISTATUS and timer output always clear */
2736 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2737 timer_del(cpu->gt_timer[timeridx]);
2738 trace_arm_gt_recalc_disabled(timeridx);
2742 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2745 ARMCPU *cpu = env_archcpu(env);
2747 timer_del(cpu->gt_timer[timeridx]);
2750 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2752 return gt_get_countervalue(env);
2755 static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2759 switch (arm_current_el(env)) {
2761 hcr = arm_hcr_el2_eff(env);
2762 if (hcr & HCR_E2H) {
2767 hcr = arm_hcr_el2_eff(env);
2768 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2774 return env->cp15.cntvoff_el2;
2777 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2779 return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2782 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2786 trace_arm_gt_cval_write(timeridx, value);
2787 env->cp15.c14_timer[timeridx].cval = value;
2788 gt_recalc_timer(env_archcpu(env), timeridx);
2791 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2794 uint64_t offset = 0;
2798 case GTIMER_HYPVIRT:
2799 offset = gt_virt_cnt_offset(env);
2803 return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2804 (gt_get_countervalue(env) - offset));
2807 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2811 uint64_t offset = 0;
2815 case GTIMER_HYPVIRT:
2816 offset = gt_virt_cnt_offset(env);
2820 trace_arm_gt_tval_write(timeridx, value);
2821 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2822 sextract64(value, 0, 32);
2823 gt_recalc_timer(env_archcpu(env), timeridx);
2826 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2830 ARMCPU *cpu = env_archcpu(env);
2831 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2833 trace_arm_gt_ctl_write(timeridx, value);
2834 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2835 if ((oldval ^ value) & 1) {
2836 /* Enable toggled */
2837 gt_recalc_timer(cpu, timeridx);
2838 } else if ((oldval ^ value) & 2) {
2839 /* IMASK toggled: don't need to recalculate,
2840 * just set the interrupt line based on ISTATUS
2842 int irqstate = (oldval & 4) && !(value & 2);
2844 trace_arm_gt_imask_toggle(timeridx, irqstate);
2845 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2849 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2851 gt_timer_reset(env, ri, GTIMER_PHYS);
2854 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2857 gt_cval_write(env, ri, GTIMER_PHYS, value);
2860 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2862 return gt_tval_read(env, ri, GTIMER_PHYS);
2865 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2868 gt_tval_write(env, ri, GTIMER_PHYS, value);
2871 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2874 gt_ctl_write(env, ri, GTIMER_PHYS, value);
2877 static int gt_phys_redir_timeridx(CPUARMState *env)
2879 switch (arm_mmu_idx(env)) {
2880 case ARMMMUIdx_E20_0:
2881 case ARMMMUIdx_E20_2:
2882 case ARMMMUIdx_E20_2_PAN:
2883 case ARMMMUIdx_SE20_0:
2884 case ARMMMUIdx_SE20_2:
2885 case ARMMMUIdx_SE20_2_PAN:
2892 static int gt_virt_redir_timeridx(CPUARMState *env)
2894 switch (arm_mmu_idx(env)) {
2895 case ARMMMUIdx_E20_0:
2896 case ARMMMUIdx_E20_2:
2897 case ARMMMUIdx_E20_2_PAN:
2898 case ARMMMUIdx_SE20_0:
2899 case ARMMMUIdx_SE20_2:
2900 case ARMMMUIdx_SE20_2_PAN:
2901 return GTIMER_HYPVIRT;
2907 static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2908 const ARMCPRegInfo *ri)
2910 int timeridx = gt_phys_redir_timeridx(env);
2911 return env->cp15.c14_timer[timeridx].cval;
2914 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2917 int timeridx = gt_phys_redir_timeridx(env);
2918 gt_cval_write(env, ri, timeridx, value);
2921 static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2922 const ARMCPRegInfo *ri)
2924 int timeridx = gt_phys_redir_timeridx(env);
2925 return gt_tval_read(env, ri, timeridx);
2928 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2931 int timeridx = gt_phys_redir_timeridx(env);
2932 gt_tval_write(env, ri, timeridx, value);
2935 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2936 const ARMCPRegInfo *ri)
2938 int timeridx = gt_phys_redir_timeridx(env);
2939 return env->cp15.c14_timer[timeridx].ctl;
2942 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2945 int timeridx = gt_phys_redir_timeridx(env);
2946 gt_ctl_write(env, ri, timeridx, value);
2949 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2951 gt_timer_reset(env, ri, GTIMER_VIRT);
2954 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2957 gt_cval_write(env, ri, GTIMER_VIRT, value);
2960 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2962 return gt_tval_read(env, ri, GTIMER_VIRT);
2965 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2968 gt_tval_write(env, ri, GTIMER_VIRT, value);
2971 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2974 gt_ctl_write(env, ri, GTIMER_VIRT, value);
2977 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2980 ARMCPU *cpu = env_archcpu(env);
2982 trace_arm_gt_cntvoff_write(value);
2983 raw_write(env, ri, value);
2984 gt_recalc_timer(cpu, GTIMER_VIRT);
2987 static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2988 const ARMCPRegInfo *ri)
2990 int timeridx = gt_virt_redir_timeridx(env);
2991 return env->cp15.c14_timer[timeridx].cval;
2994 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2997 int timeridx = gt_virt_redir_timeridx(env);
2998 gt_cval_write(env, ri, timeridx, value);
3001 static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
3002 const ARMCPRegInfo *ri)
3004 int timeridx = gt_virt_redir_timeridx(env);
3005 return gt_tval_read(env, ri, timeridx);
3008 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3011 int timeridx = gt_virt_redir_timeridx(env);
3012 gt_tval_write(env, ri, timeridx, value);
3015 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
3016 const ARMCPRegInfo *ri)
3018 int timeridx = gt_virt_redir_timeridx(env);
3019 return env->cp15.c14_timer[timeridx].ctl;
3022 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3025 int timeridx = gt_virt_redir_timeridx(env);
3026 gt_ctl_write(env, ri, timeridx, value);
3029 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3031 gt_timer_reset(env, ri, GTIMER_HYP);
3034 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3037 gt_cval_write(env, ri, GTIMER_HYP, value);
3040 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3042 return gt_tval_read(env, ri, GTIMER_HYP);
3045 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3048 gt_tval_write(env, ri, GTIMER_HYP, value);
3051 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3054 gt_ctl_write(env, ri, GTIMER_HYP, value);
3057 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3059 gt_timer_reset(env, ri, GTIMER_SEC);
3062 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3065 gt_cval_write(env, ri, GTIMER_SEC, value);
3068 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3070 return gt_tval_read(env, ri, GTIMER_SEC);
3073 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3076 gt_tval_write(env, ri, GTIMER_SEC, value);
3079 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3082 gt_ctl_write(env, ri, GTIMER_SEC, value);
3085 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3087 gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3090 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3093 gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3096 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3098 return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3101 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3104 gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3107 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3110 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3113 void arm_gt_ptimer_cb(void *opaque)
3115 ARMCPU *cpu = opaque;
3117 gt_recalc_timer(cpu, GTIMER_PHYS);
3120 void arm_gt_vtimer_cb(void *opaque)
3122 ARMCPU *cpu = opaque;
3124 gt_recalc_timer(cpu, GTIMER_VIRT);
3127 void arm_gt_htimer_cb(void *opaque)
3129 ARMCPU *cpu = opaque;
3131 gt_recalc_timer(cpu, GTIMER_HYP);
3134 void arm_gt_stimer_cb(void *opaque)
3136 ARMCPU *cpu = opaque;
3138 gt_recalc_timer(cpu, GTIMER_SEC);
3141 void arm_gt_hvtimer_cb(void *opaque)
3143 ARMCPU *cpu = opaque;
3145 gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3148 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3150 ARMCPU *cpu = env_archcpu(env);
3152 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3155 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3156 /* Note that CNTFRQ is purely reads-as-written for the benefit
3157 * of software; writing it doesn't actually change the timer frequency.
3158 * Our reset value matches the fixed frequency we implement the timer at.
3160 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3161 .type = ARM_CP_ALIAS,
3162 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3163 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3165 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3166 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3167 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3168 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3169 .resetfn = arm_gt_cntfrq_reset,
3171 /* overall control: mostly access permissions */
3172 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
3173 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
3175 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
3178 /* per-timer control */
3179 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3180 .secure = ARM_CP_SECSTATE_NS,
3181 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3182 .accessfn = gt_ptimer_access,
3183 .fieldoffset = offsetoflow32(CPUARMState,
3184 cp15.c14_timer[GTIMER_PHYS].ctl),
3185 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3186 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3188 { .name = "CNTP_CTL_S",
3189 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3190 .secure = ARM_CP_SECSTATE_S,
3191 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3192 .accessfn = gt_ptimer_access,
3193 .fieldoffset = offsetoflow32(CPUARMState,
3194 cp15.c14_timer[GTIMER_SEC].ctl),
3195 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3197 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3198 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
3199 .type = ARM_CP_IO, .access = PL0_RW,
3200 .accessfn = gt_ptimer_access,
3201 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
3203 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3204 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3206 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3207 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3208 .accessfn = gt_vtimer_access,
3209 .fieldoffset = offsetoflow32(CPUARMState,
3210 cp15.c14_timer[GTIMER_VIRT].ctl),
3211 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3212 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3214 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3215 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
3216 .type = ARM_CP_IO, .access = PL0_RW,
3217 .accessfn = gt_vtimer_access,
3218 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
3220 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3221 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3223 /* TimerValue views: a 32 bit downcounting view of the underlying state */
3224 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3225 .secure = ARM_CP_SECSTATE_NS,
3226 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3227 .accessfn = gt_ptimer_access,
3228 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3230 { .name = "CNTP_TVAL_S",
3231 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3232 .secure = ARM_CP_SECSTATE_S,
3233 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3234 .accessfn = gt_ptimer_access,
3235 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
3237 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3238 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
3239 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3240 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
3241 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3243 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3244 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3245 .accessfn = gt_vtimer_access,
3246 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3248 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3249 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
3250 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3251 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
3252 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3254 /* The counter itself */
3255 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3256 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3257 .accessfn = gt_pct_access,
3258 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3260 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3261 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3262 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3263 .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3265 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3266 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3267 .accessfn = gt_vct_access,
3268 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3270 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3271 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3272 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3273 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3275 /* Comparison value, indicating when the timer goes off */
3276 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3277 .secure = ARM_CP_SECSTATE_NS,
3279 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3280 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3281 .accessfn = gt_ptimer_access,
3282 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3283 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3285 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3286 .secure = ARM_CP_SECSTATE_S,
3288 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3289 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3290 .accessfn = gt_ptimer_access,
3291 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3293 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3294 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3297 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3298 .resetvalue = 0, .accessfn = gt_ptimer_access,
3299 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3300 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3302 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3304 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3305 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3306 .accessfn = gt_vtimer_access,
3307 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3308 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3310 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3311 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3314 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3315 .resetvalue = 0, .accessfn = gt_vtimer_access,
3316 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3317 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3319 /* Secure timer -- this is actually restricted to only EL3
3320 * and configurably Secure-EL1 via the accessfn.
3322 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3323 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3324 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3325 .accessfn = gt_stimer_access,
3326 .readfn = gt_sec_tval_read,
3327 .writefn = gt_sec_tval_write,
3328 .resetfn = gt_sec_timer_reset,
3330 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3331 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3332 .type = ARM_CP_IO, .access = PL1_RW,
3333 .accessfn = gt_stimer_access,
3334 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3336 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3338 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3339 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3340 .type = ARM_CP_IO, .access = PL1_RW,
3341 .accessfn = gt_stimer_access,
3342 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3343 .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3348 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3351 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3352 return CP_ACCESS_TRAP;
3354 return CP_ACCESS_OK;
3359 /* In user-mode most of the generic timer registers are inaccessible
3360 * however modern kernels (4.12+) allow access to cntvct_el0
3363 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3365 ARMCPU *cpu = env_archcpu(env);
3367 /* Currently we have no support for QEMUTimer in linux-user so we
3368 * can't call gt_get_countervalue(env), instead we directly
3369 * call the lower level functions.
3371 return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3374 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3375 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3376 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3377 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3378 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3379 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3381 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3382 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3383 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3384 .readfn = gt_virt_cnt_read,
3391 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3393 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3394 raw_write(env, ri, value);
3395 } else if (arm_feature(env, ARM_FEATURE_V7)) {
3396 raw_write(env, ri, value & 0xfffff6ff);
3398 raw_write(env, ri, value & 0xfffff1ff);
3402 #ifndef CONFIG_USER_ONLY
3403 /* get_phys_addr() isn't present for user-mode-only targets */
3405 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3409 /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3410 * Secure EL1 (which can only happen if EL3 is AArch64).
3411 * They are simply UNDEF if executed from NS EL1.
3412 * They function normally from EL2 or EL3.
3414 if (arm_current_el(env) == 1) {
3415 if (arm_is_secure_below_el3(env)) {
3416 if (env->cp15.scr_el3 & SCR_EEL2) {
3417 return CP_ACCESS_TRAP_UNCATEGORIZED_EL2;
3419 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
3421 return CP_ACCESS_TRAP_UNCATEGORIZED;
3424 return CP_ACCESS_OK;
3428 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3429 MMUAccessType access_type, ARMMMUIdx mmu_idx)
3432 target_ulong page_size;
3436 bool format64 = false;
3437 MemTxAttrs attrs = {};
3438 ARMMMUFaultInfo fi = {};
3439 ARMCacheAttrs cacheattrs = {};
3441 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
3442 &prot, &page_size, &fi, &cacheattrs);
3446 * Some kinds of translation fault must cause exceptions rather
3447 * than being reported in the PAR.
3449 int current_el = arm_current_el(env);
3451 uint32_t syn, fsr, fsc;
3452 bool take_exc = false;
3454 if (fi.s1ptw && current_el == 1
3455 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3457 * Synchronous stage 2 fault on an access made as part of the
3458 * translation table walk for AT S1E0* or AT S1E1* insn
3459 * executed from NS EL1. If this is a synchronous external abort
3460 * and SCR_EL3.EA == 1, then we take a synchronous external abort
3461 * to EL3. Otherwise the fault is taken as an exception to EL2,
3462 * and HPFAR_EL2 holds the faulting IPA.
3464 if (fi.type == ARMFault_SyncExternalOnWalk &&
3465 (env->cp15.scr_el3 & SCR_EA)) {
3468 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3469 if (arm_is_secure_below_el3(env) && fi.s1ns) {
3470 env->cp15.hpfar_el2 |= HPFAR_NS;
3475 } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3477 * Synchronous external aborts during a translation table walk
3478 * are taken as Data Abort exceptions.
3481 if (current_el == 3) {
3487 target_el = exception_target_el(env);
3493 /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3494 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3495 arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3496 fsr = arm_fi_to_lfsc(&fi);
3497 fsc = extract32(fsr, 0, 6);
3499 fsr = arm_fi_to_sfsc(&fi);
3503 * Report exception with ESR indicating a fault due to a
3504 * translation table walk for a cache maintenance instruction.
3506 syn = syn_data_abort_no_iss(current_el == target_el, 0,
3507 fi.ea, 1, fi.s1ptw, 1, fsc);
3508 env->exception.vaddress = value;
3509 env->exception.fsr = fsr;
3510 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3516 } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3519 * * TTBCR.EAE determines whether the result is returned using the
3520 * 32-bit or the 64-bit PAR format
3521 * * Instructions executed in Hyp mode always use the 64bit format
3523 * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3524 * * The Non-secure TTBCR.EAE bit is set to 1
3525 * * The implementation includes EL2, and the value of HCR.VM is 1
3527 * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3529 * ATS1Hx always uses the 64bit format.
3531 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3533 if (arm_feature(env, ARM_FEATURE_EL2)) {
3534 if (mmu_idx == ARMMMUIdx_E10_0 ||
3535 mmu_idx == ARMMMUIdx_E10_1 ||
3536 mmu_idx == ARMMMUIdx_E10_1_PAN) {
3537 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3539 format64 |= arm_current_el(env) == 2;
3545 /* Create a 64-bit PAR */
3546 par64 = (1 << 11); /* LPAE bit always set */
3548 par64 |= phys_addr & ~0xfffULL;
3549 if (!attrs.secure) {
3550 par64 |= (1 << 9); /* NS */
3552 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
3553 par64 |= cacheattrs.shareability << 7; /* SH */
3555 uint32_t fsr = arm_fi_to_lfsc(&fi);
3558 par64 |= (fsr & 0x3f) << 1; /* FS */
3560 par64 |= (1 << 9); /* S */
3563 par64 |= (1 << 8); /* PTW */
3567 /* fsr is a DFSR/IFSR value for the short descriptor
3568 * translation table format (with WnR always clear).
3569 * Convert it to a 32-bit PAR.
3572 /* We do not set any attribute bits in the PAR */
3573 if (page_size == (1 << 24)
3574 && arm_feature(env, ARM_FEATURE_V7)) {
3575 par64 = (phys_addr & 0xff000000) | (1 << 1);
3577 par64 = phys_addr & 0xfffff000;
3579 if (!attrs.secure) {
3580 par64 |= (1 << 9); /* NS */
3583 uint32_t fsr = arm_fi_to_sfsc(&fi);
3585 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3586 ((fsr & 0xf) << 1) | 1;
3591 #endif /* CONFIG_TCG */
3593 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3596 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3599 int el = arm_current_el(env);
3600 bool secure = arm_is_secure_below_el3(env);
3602 switch (ri->opc2 & 6) {
3604 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3607 mmu_idx = ARMMMUIdx_SE3;
3610 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
3613 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
3614 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
3615 : ARMMMUIdx_Stage1_E1_PAN);
3617 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
3621 g_assert_not_reached();
3625 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3628 mmu_idx = ARMMMUIdx_SE10_0;
3631 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */
3632 mmu_idx = ARMMMUIdx_Stage1_E0;
3635 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
3638 g_assert_not_reached();
3642 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3643 mmu_idx = ARMMMUIdx_E10_1;
3646 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3647 mmu_idx = ARMMMUIdx_E10_0;
3650 g_assert_not_reached();
3653 par64 = do_ats_write(env, value, access_type, mmu_idx);
3655 A32_BANKED_CURRENT_REG_SET(env, par, par64);
3657 /* Handled by hardware accelerator. */
3658 g_assert_not_reached();
3659 #endif /* CONFIG_TCG */
3662 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3666 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3669 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
3671 A32_BANKED_CURRENT_REG_SET(env, par, par64);
3673 /* Handled by hardware accelerator. */
3674 g_assert_not_reached();
3675 #endif /* CONFIG_TCG */
3678 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3681 if (arm_current_el(env) == 3 &&
3682 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
3683 return CP_ACCESS_TRAP;
3685 return CP_ACCESS_OK;
3688 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3692 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3694 int secure = arm_is_secure_below_el3(env);
3696 switch (ri->opc2 & 6) {
3699 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3700 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
3701 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
3702 : ARMMMUIdx_Stage1_E1_PAN);
3704 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
3707 case 4: /* AT S1E2R, AT S1E2W */
3708 mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
3710 case 6: /* AT S1E3R, AT S1E3W */
3711 mmu_idx = ARMMMUIdx_SE3;
3714 g_assert_not_reached();
3717 case 2: /* AT S1E0R, AT S1E0W */
3718 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
3720 case 4: /* AT S12E1R, AT S12E1W */
3721 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
3723 case 6: /* AT S12E0R, AT S12E0W */
3724 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
3727 g_assert_not_reached();
3730 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3732 /* Handled by hardware accelerator. */
3733 g_assert_not_reached();
3734 #endif /* CONFIG_TCG */
3738 static const ARMCPRegInfo vapa_cp_reginfo[] = {
3739 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3740 .access = PL1_RW, .resetvalue = 0,
3741 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3742 offsetoflow32(CPUARMState, cp15.par_ns) },
3743 .writefn = par_write },
3744 #ifndef CONFIG_USER_ONLY
3745 /* This underdecoding is safe because the reginfo is NO_RAW. */
3746 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3747 .access = PL1_W, .accessfn = ats_access,
3748 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
3753 /* Return basic MPU access permission bits. */
3754 static uint32_t simple_mpu_ap_bits(uint32_t val)
3761 for (i = 0; i < 16; i += 2) {
3762 ret |= (val >> i) & mask;
3768 /* Pad basic MPU access permission bits to extended format. */
3769 static uint32_t extended_mpu_ap_bits(uint32_t val)
3776 for (i = 0; i < 16; i += 2) {
3777 ret |= (val & mask) << i;
3783 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3786 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3789 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3791 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3794 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3797 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3800 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3802 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3805 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3807 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3813 u32p += env->pmsav7.rnr[M_REG_NS];
3817 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3820 ARMCPU *cpu = env_archcpu(env);
3821 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3827 u32p += env->pmsav7.rnr[M_REG_NS];
3828 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3832 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3835 ARMCPU *cpu = env_archcpu(env);
3836 uint32_t nrgs = cpu->pmsav7_dregion;
3838 if (value >= nrgs) {
3839 qemu_log_mask(LOG_GUEST_ERROR,
3840 "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3841 " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3845 raw_write(env, ri, value);
3848 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3849 /* Reset for all these registers is handled in arm_cpu_reset(),
3850 * because the PMSAv7 is also used by M-profile CPUs, which do
3851 * not register cpregs but still need the state to be reset.
3853 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3854 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3855 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3856 .readfn = pmsav7_read, .writefn = pmsav7_write,
3857 .resetfn = arm_cp_reset_ignore },
3858 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3859 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3860 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3861 .readfn = pmsav7_read, .writefn = pmsav7_write,
3862 .resetfn = arm_cp_reset_ignore },
3863 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3864 .access = PL1_RW, .type = ARM_CP_NO_RAW,
3865 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3866 .readfn = pmsav7_read, .writefn = pmsav7_write,
3867 .resetfn = arm_cp_reset_ignore },
3868 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3870 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3871 .writefn = pmsav7_rgnr_write,
3872 .resetfn = arm_cp_reset_ignore },
3876 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3877 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3878 .access = PL1_RW, .type = ARM_CP_ALIAS,
3879 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3880 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3881 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3882 .access = PL1_RW, .type = ARM_CP_ALIAS,
3883 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3884 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3885 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3887 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3889 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3891 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3893 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3895 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3896 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3898 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3899 /* Protection region base and size registers */
3900 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3901 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3902 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3903 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3904 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3905 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3906 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3907 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3908 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3909 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3910 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3911 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3912 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3913 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3914 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3915 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3916 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3917 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3918 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3919 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3920 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3921 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3922 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3923 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3927 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3930 TCR *tcr = raw_ptr(env, ri);
3931 int maskshift = extract32(value, 0, 3);
3933 if (!arm_feature(env, ARM_FEATURE_V8)) {
3934 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3935 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3936 * using Long-desciptor translation table format */
3937 value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3938 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3939 /* In an implementation that includes the Security Extensions
3940 * TTBCR has additional fields PD0 [4] and PD1 [5] for
3941 * Short-descriptor translation table format.
3943 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3949 /* Update the masks corresponding to the TCR bank being written
3950 * Note that we always calculate mask and base_mask, but
3951 * they are only used for short-descriptor tables (ie if EAE is 0);
3952 * for long-descriptor tables the TCR fields are used differently
3953 * and the mask and base_mask values are meaningless.
3955 tcr->raw_tcr = value;
3956 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3957 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3960 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3963 ARMCPU *cpu = env_archcpu(env);
3964 TCR *tcr = raw_ptr(env, ri);
3966 if (arm_feature(env, ARM_FEATURE_LPAE)) {
3967 /* With LPAE the TTBCR could result in a change of ASID
3968 * via the TTBCR.A1 bit, so do a TLB flush.
3970 tlb_flush(CPU(cpu));
3972 /* Preserve the high half of TCR_EL1, set via TTBCR2. */
3973 value = deposit64(tcr->raw_tcr, 0, 32, value);
3974 vmsa_ttbcr_raw_write(env, ri, value);
3977 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3979 TCR *tcr = raw_ptr(env, ri);
3981 /* Reset both the TCR as well as the masks corresponding to the bank of
3982 * the TCR being reset.
3986 tcr->base_mask = 0xffffc000u;
3989 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
3992 ARMCPU *cpu = env_archcpu(env);
3993 TCR *tcr = raw_ptr(env, ri);
3995 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3996 tlb_flush(CPU(cpu));
3997 tcr->raw_tcr = value;
4000 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4003 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */
4004 if (cpreg_field_is_64bit(ri) &&
4005 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4006 ARMCPU *cpu = env_archcpu(env);
4007 tlb_flush(CPU(cpu));
4009 raw_write(env, ri, value);
4012 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4016 * If we are running with E2&0 regime, then an ASID is active.
4017 * Flush if that might be changing. Note we're not checking
4018 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
4019 * holds the active ASID, only checking the field that might.
4021 if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
4022 (arm_hcr_el2_eff(env) & HCR_E2H)) {
4023 uint16_t mask = ARMMMUIdxBit_E20_2 |
4024 ARMMMUIdxBit_E20_2_PAN |
4027 if (arm_is_secure_below_el3(env)) {
4028 mask >>= ARM_MMU_IDX_A_NS;
4031 tlb_flush_by_mmuidx(env_cpu(env), mask);
4033 raw_write(env, ri, value);
4036 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4039 ARMCPU *cpu = env_archcpu(env);
4040 CPUState *cs = CPU(cpu);
4043 * A change in VMID to the stage2 page table (Stage2) invalidates
4044 * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
4046 if (raw_read(env, ri) != value) {
4047 uint16_t mask = ARMMMUIdxBit_E10_1 |
4048 ARMMMUIdxBit_E10_1_PAN |
4051 if (arm_is_secure_below_el3(env)) {
4052 mask >>= ARM_MMU_IDX_A_NS;
4055 tlb_flush_by_mmuidx(cs, mask);
4056 raw_write(env, ri, value);
4060 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
4061 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4062 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
4063 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
4064 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
4065 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4066 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4067 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
4068 offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
4069 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
4070 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4071 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
4072 offsetof(CPUARMState, cp15.dfar_ns) } },
4073 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
4074 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
4075 .access = PL1_RW, .accessfn = access_tvm_trvm,
4076 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
4081 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
4082 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
4083 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
4084 .access = PL1_RW, .accessfn = access_tvm_trvm,
4085 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
4086 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
4087 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
4088 .access = PL1_RW, .accessfn = access_tvm_trvm,
4089 .writefn = vmsa_ttbr_write, .resetvalue = 0,
4090 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4091 offsetof(CPUARMState, cp15.ttbr0_ns) } },
4092 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
4093 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
4094 .access = PL1_RW, .accessfn = access_tvm_trvm,
4095 .writefn = vmsa_ttbr_write, .resetvalue = 0,
4096 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4097 offsetof(CPUARMState, cp15.ttbr1_ns) } },
4098 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
4099 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4100 .access = PL1_RW, .accessfn = access_tvm_trvm,
4101 .writefn = vmsa_tcr_el12_write,
4102 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
4103 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
4104 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4105 .access = PL1_RW, .accessfn = access_tvm_trvm,
4106 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
4107 .raw_writefn = vmsa_ttbcr_raw_write,
4108 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
4109 offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
4113 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4114 * qemu tlbs nor adjusting cached masks.
4116 static const ARMCPRegInfo ttbcr2_reginfo = {
4117 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
4118 .access = PL1_RW, .accessfn = access_tvm_trvm,
4119 .type = ARM_CP_ALIAS,
4120 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
4121 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
4124 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
4127 env->cp15.c15_ticonfig = value & 0xe7;
4128 /* The OS_TYPE bit in this register changes the reported CPUID! */
4129 env->cp15.c0_cpuid = (value & (1 << 5)) ?
4130 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
4133 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
4136 env->cp15.c15_threadid = value & 0xffff;
4139 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
4142 /* Wait-for-interrupt (deprecated) */
4143 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
4146 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
4149 /* On OMAP there are registers indicating the max/min index of dcache lines
4150 * containing a dirty line; cache flush operations have to reset these.
4152 env->cp15.c15_i_max = 0x000;
4153 env->cp15.c15_i_min = 0xff0;
4156 static const ARMCPRegInfo omap_cp_reginfo[] = {
4157 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
4158 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
4159 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
4161 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
4162 .access = PL1_RW, .type = ARM_CP_NOP },
4163 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
4165 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
4166 .writefn = omap_ticonfig_write },
4167 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
4169 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
4170 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
4171 .access = PL1_RW, .resetvalue = 0xff0,
4172 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
4173 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
4175 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
4176 .writefn = omap_threadid_write },
4177 { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
4178 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4179 .type = ARM_CP_NO_RAW,
4180 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
4181 /* TODO: Peripheral port remap register:
4182 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4183 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4186 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
4187 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
4188 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
4189 .writefn = omap_cachemaint_write },
4190 { .name = "C9", .cp = 15, .crn = 9,
4191 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
4192 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
4196 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4199 env->cp15.c15_cpar = value & 0x3fff;
4202 static const ARMCPRegInfo xscale_cp_reginfo[] = {
4203 { .name = "XSCALE_CPAR",
4204 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4205 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
4206 .writefn = xscale_cpar_write, },
4207 { .name = "XSCALE_AUXCR",
4208 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
4209 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
4211 /* XScale specific cache-lockdown: since we have no cache we NOP these
4212 * and hope the guest does not really rely on cache behaviour.
4214 { .name = "XSCALE_LOCK_ICACHE_LINE",
4215 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
4216 .access = PL1_W, .type = ARM_CP_NOP },
4217 { .name = "XSCALE_UNLOCK_ICACHE",
4218 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
4219 .access = PL1_W, .type = ARM_CP_NOP },
4220 { .name = "XSCALE_DCACHE_LOCK",
4221 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
4222 .access = PL1_RW, .type = ARM_CP_NOP },
4223 { .name = "XSCALE_UNLOCK_DCACHE",
4224 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
4225 .access = PL1_W, .type = ARM_CP_NOP },
4229 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
4230 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
4231 * implementation of this implementation-defined space.
4232 * Ideally this should eventually disappear in favour of actually
4233 * implementing the correct behaviour for all cores.
4235 { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
4236 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4238 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
4243 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
4244 /* Cache status: RAZ because we have no cache so it's always clean */
4245 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4246 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4251 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
4252 /* We never have a a block transfer operation in progress */
4253 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4254 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4256 /* The cache ops themselves: these all NOP for QEMU */
4257 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4258 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4259 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4260 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4261 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4262 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4263 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4264 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4265 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4266 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4267 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4268 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4272 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4273 /* The cache test-and-clean instructions always return (1 << 30)
4274 * to indicate that there are no dirty cache lines.
4276 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4277 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4278 .resetvalue = (1 << 30) },
4279 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4280 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4281 .resetvalue = (1 << 30) },
4285 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4286 /* Ignore ReadBuffer accesses */
4287 { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4288 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4289 .access = PL1_RW, .resetvalue = 0,
4290 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4294 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4296 unsigned int cur_el = arm_current_el(env);
4298 if (arm_is_el2_enabled(env) && cur_el == 1) {
4299 return env->cp15.vpidr_el2;
4301 return raw_read(env, ri);
4304 static uint64_t mpidr_read_val(CPUARMState *env)
4306 ARMCPU *cpu = env_archcpu(env);
4307 uint64_t mpidr = cpu->mp_affinity;
4309 if (arm_feature(env, ARM_FEATURE_V7MP)) {
4310 mpidr |= (1U << 31);
4311 /* Cores which are uniprocessor (non-coherent)
4312 * but still implement the MP extensions set
4313 * bit 30. (For instance, Cortex-R5).
4315 if (cpu->mp_is_up) {
4316 mpidr |= (1u << 30);
4322 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4324 unsigned int cur_el = arm_current_el(env);
4326 if (arm_is_el2_enabled(env) && cur_el == 1) {
4327 return env->cp15.vmpidr_el2;
4329 return mpidr_read_val(env);
4332 static const ARMCPRegInfo lpae_cp_reginfo[] = {
4334 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4335 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4336 .access = PL1_RW, .accessfn = access_tvm_trvm,
4337 .type = ARM_CP_CONST, .resetvalue = 0 },
4338 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4339 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4340 .access = PL1_RW, .accessfn = access_tvm_trvm,
4341 .type = ARM_CP_CONST, .resetvalue = 0 },
4342 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4343 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4344 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4345 offsetof(CPUARMState, cp15.par_ns)} },
4346 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4347 .access = PL1_RW, .accessfn = access_tvm_trvm,
4348 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4349 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4350 offsetof(CPUARMState, cp15.ttbr0_ns) },
4351 .writefn = vmsa_ttbr_write, },
4352 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4353 .access = PL1_RW, .accessfn = access_tvm_trvm,
4354 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4355 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4356 offsetof(CPUARMState, cp15.ttbr1_ns) },
4357 .writefn = vmsa_ttbr_write, },
4361 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4363 return vfp_get_fpcr(env);
4366 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4369 vfp_set_fpcr(env, value);
4372 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4374 return vfp_get_fpsr(env);
4377 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4380 vfp_set_fpsr(env, value);
4383 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4386 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4387 return CP_ACCESS_TRAP;
4389 return CP_ACCESS_OK;
4392 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4395 env->daif = value & PSTATE_DAIF;
4398 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4400 return env->pstate & PSTATE_PAN;
4403 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4406 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4409 static const ARMCPRegInfo pan_reginfo = {
4410 .name = "PAN", .state = ARM_CP_STATE_AA64,
4411 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4412 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4413 .readfn = aa64_pan_read, .writefn = aa64_pan_write
4416 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4418 return env->pstate & PSTATE_UAO;
4421 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4424 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4427 static const ARMCPRegInfo uao_reginfo = {
4428 .name = "UAO", .state = ARM_CP_STATE_AA64,
4429 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4430 .type = ARM_CP_NO_RAW, .access = PL1_RW,
4431 .readfn = aa64_uao_read, .writefn = aa64_uao_write
4434 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4436 return env->pstate & PSTATE_DIT;
4439 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4442 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4445 static const ARMCPRegInfo dit_reginfo = {
4446 .name = "DIT", .state = ARM_CP_STATE_AA64,
4447 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4448 .type = ARM_CP_NO_RAW, .access = PL0_RW,
4449 .readfn = aa64_dit_read, .writefn = aa64_dit_write
4452 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4453 const ARMCPRegInfo *ri,
4456 /* Cache invalidate/clean to Point of Coherency or Persistence... */
4457 switch (arm_current_el(env)) {
4459 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4460 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4461 return CP_ACCESS_TRAP;
4465 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */
4466 if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4467 return CP_ACCESS_TRAP_EL2;
4471 return CP_ACCESS_OK;
4474 static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
4475 const ARMCPRegInfo *ri,
4478 /* Cache invalidate/clean to Point of Unification... */
4479 switch (arm_current_el(env)) {
4481 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */
4482 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4483 return CP_ACCESS_TRAP;
4487 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */
4488 if (arm_hcr_el2_eff(env) & HCR_TPU) {
4489 return CP_ACCESS_TRAP_EL2;
4493 return CP_ACCESS_OK;
4496 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4497 * Page D4-1736 (DDI0487A.b)
4500 static int vae1_tlbmask(CPUARMState *env)
4502 uint64_t hcr = arm_hcr_el2_eff(env);
4505 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4506 mask = ARMMMUIdxBit_E20_2 |
4507 ARMMMUIdxBit_E20_2_PAN |
4510 mask = ARMMMUIdxBit_E10_1 |
4511 ARMMMUIdxBit_E10_1_PAN |
4515 if (arm_is_secure_below_el3(env)) {
4516 mask >>= ARM_MMU_IDX_A_NS;
4522 /* Return 56 if TBI is enabled, 64 otherwise. */
4523 static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4526 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
4527 int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4528 int select = extract64(addr, 55, 1);
4530 return (tbi >> select) & 1 ? 56 : 64;
4533 static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4535 uint64_t hcr = arm_hcr_el2_eff(env);
4538 /* Only the regime of the mmu_idx below is significant. */
4539 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4540 mmu_idx = ARMMMUIdx_E20_0;
4542 mmu_idx = ARMMMUIdx_E10_0;
4545 if (arm_is_secure_below_el3(env)) {
4546 mmu_idx &= ~ARM_MMU_IDX_A_NS;
4549 return tlbbits_for_regime(env, mmu_idx, addr);
4552 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4555 CPUState *cs = env_cpu(env);
4556 int mask = vae1_tlbmask(env);
4558 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4561 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4564 CPUState *cs = env_cpu(env);
4565 int mask = vae1_tlbmask(env);
4567 if (tlb_force_broadcast(env)) {
4568 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4570 tlb_flush_by_mmuidx(cs, mask);
4574 static int alle1_tlbmask(CPUARMState *env)
4577 * Note that the 'ALL' scope must invalidate both stage 1 and
4578 * stage 2 translations, whereas most other scopes only invalidate
4579 * stage 1 translations.
4581 if (arm_is_secure_below_el3(env)) {
4582 return ARMMMUIdxBit_SE10_1 |
4583 ARMMMUIdxBit_SE10_1_PAN |
4584 ARMMMUIdxBit_SE10_0;
4586 return ARMMMUIdxBit_E10_1 |
4587 ARMMMUIdxBit_E10_1_PAN |
4592 static int e2_tlbmask(CPUARMState *env)
4594 if (arm_is_secure_below_el3(env)) {
4595 return ARMMMUIdxBit_SE20_0 |
4596 ARMMMUIdxBit_SE20_2 |
4597 ARMMMUIdxBit_SE20_2_PAN |
4600 return ARMMMUIdxBit_E20_0 |
4601 ARMMMUIdxBit_E20_2 |
4602 ARMMMUIdxBit_E20_2_PAN |
4607 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4610 CPUState *cs = env_cpu(env);
4611 int mask = alle1_tlbmask(env);
4613 tlb_flush_by_mmuidx(cs, mask);
4616 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4619 CPUState *cs = env_cpu(env);
4620 int mask = e2_tlbmask(env);
4622 tlb_flush_by_mmuidx(cs, mask);
4625 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4628 ARMCPU *cpu = env_archcpu(env);
4629 CPUState *cs = CPU(cpu);
4631 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
4634 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4637 CPUState *cs = env_cpu(env);
4638 int mask = alle1_tlbmask(env);
4640 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4643 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4646 CPUState *cs = env_cpu(env);
4647 int mask = e2_tlbmask(env);
4649 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4652 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4655 CPUState *cs = env_cpu(env);
4657 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
4660 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4663 /* Invalidate by VA, EL2
4664 * Currently handles both VAE2 and VALE2, since we don't support
4665 * flush-last-level-only.
4667 CPUState *cs = env_cpu(env);
4668 int mask = e2_tlbmask(env);
4669 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4671 tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
4674 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4677 /* Invalidate by VA, EL3
4678 * Currently handles both VAE3 and VALE3, since we don't support
4679 * flush-last-level-only.
4681 ARMCPU *cpu = env_archcpu(env);
4682 CPUState *cs = CPU(cpu);
4683 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4685 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
4688 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4691 CPUState *cs = env_cpu(env);
4692 int mask = vae1_tlbmask(env);
4693 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4694 int bits = vae1_tlbbits(env, pageaddr);
4696 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4699 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4702 /* Invalidate by VA, EL1&0 (AArch64 version).
4703 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4704 * since we don't support flush-for-specific-ASID-only or
4705 * flush-last-level-only.
4707 CPUState *cs = env_cpu(env);
4708 int mask = vae1_tlbmask(env);
4709 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4710 int bits = vae1_tlbbits(env, pageaddr);
4712 if (tlb_force_broadcast(env)) {
4713 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4715 tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
4719 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4722 CPUState *cs = env_cpu(env);
4723 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4724 bool secure = arm_is_secure_below_el3(env);
4725 int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
4726 int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_E2 : ARMMMUIdx_SE2,
4729 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4732 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4735 CPUState *cs = env_cpu(env);
4736 uint64_t pageaddr = sextract64(value << 12, 0, 56);
4737 int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
4739 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4740 ARMMMUIdxBit_SE3, bits);
4743 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4746 int cur_el = arm_current_el(env);
4749 uint64_t hcr = arm_hcr_el2_eff(env);
4752 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4753 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
4754 return CP_ACCESS_TRAP_EL2;
4757 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4758 return CP_ACCESS_TRAP;
4760 if (hcr & HCR_TDZ) {
4761 return CP_ACCESS_TRAP_EL2;
4764 } else if (hcr & HCR_TDZ) {
4765 return CP_ACCESS_TRAP_EL2;
4768 return CP_ACCESS_OK;
4771 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4773 ARMCPU *cpu = env_archcpu(env);
4774 int dzp_bit = 1 << 4;
4776 /* DZP indicates whether DC ZVA access is allowed */
4777 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4780 return cpu->dcz_blocksize | dzp_bit;
4783 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4786 if (!(env->pstate & PSTATE_SP)) {
4787 /* Access to SP_EL0 is undefined if it's being used as
4788 * the stack pointer.
4790 return CP_ACCESS_TRAP_UNCATEGORIZED;
4792 return CP_ACCESS_OK;
4795 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4797 return env->pstate & PSTATE_SP;
4800 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4802 update_spsel(env, val);
4805 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4808 ARMCPU *cpu = env_archcpu(env);
4810 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4811 /* M bit is RAZ/WI for PMSA with no MPU implemented */
4815 /* ??? Lots of these bits are not implemented. */
4817 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
4818 if (ri->opc1 == 6) { /* SCTLR_EL3 */
4819 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
4821 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
4822 SCTLR_ATA0 | SCTLR_ATA);
4826 if (raw_read(env, ri) == value) {
4827 /* Skip the TLB flush if nothing actually changed; Linux likes
4828 * to do a lot of pointless SCTLR writes.
4833 raw_write(env, ri, value);
4835 /* This may enable/disable the MMU, so do a TLB flush. */
4836 tlb_flush(CPU(cpu));
4838 if (ri->type & ARM_CP_SUPPRESS_TB_END) {
4840 * Normally we would always end the TB on an SCTLR write; see the
4841 * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4842 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4843 * of hflags from the translator, so do it here.
4845 arm_rebuild_hflags(env);
4849 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4852 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
4853 return CP_ACCESS_TRAP_FP_EL2;
4855 if (env->cp15.cptr_el[3] & CPTR_TFP) {
4856 return CP_ACCESS_TRAP_FP_EL3;
4858 return CP_ACCESS_OK;
4861 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4864 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4867 static const ARMCPRegInfo v8_cp_reginfo[] = {
4868 /* Minimal set of EL0-visible registers. This will need to be expanded
4869 * significantly for system emulation of AArch64 CPUs.
4871 { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4872 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4873 .access = PL0_RW, .type = ARM_CP_NZCV },
4874 { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4875 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4876 .type = ARM_CP_NO_RAW,
4877 .access = PL0_RW, .accessfn = aa64_daif_access,
4878 .fieldoffset = offsetof(CPUARMState, daif),
4879 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4880 { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4881 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4882 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4883 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4884 { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4885 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4886 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4887 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4888 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4889 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4890 .access = PL0_R, .type = ARM_CP_NO_RAW,
4891 .readfn = aa64_dczid_read },
4892 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4893 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4894 .access = PL0_W, .type = ARM_CP_DC_ZVA,
4895 #ifndef CONFIG_USER_ONLY
4896 /* Avoid overhead of an access check that always passes in user-mode */
4897 .accessfn = aa64_zva_access,
4900 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4901 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4902 .access = PL1_R, .type = ARM_CP_CURRENTEL },
4903 /* Cache ops: all NOPs since we don't emulate caches */
4904 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4905 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4906 .access = PL1_W, .type = ARM_CP_NOP,
4907 .accessfn = aa64_cacheop_pou_access },
4908 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4909 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4910 .access = PL1_W, .type = ARM_CP_NOP,
4911 .accessfn = aa64_cacheop_pou_access },
4912 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4913 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4914 .access = PL0_W, .type = ARM_CP_NOP,
4915 .accessfn = aa64_cacheop_pou_access },
4916 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4917 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4918 .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
4919 .type = ARM_CP_NOP },
4920 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4921 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4922 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4923 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4924 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4925 .access = PL0_W, .type = ARM_CP_NOP,
4926 .accessfn = aa64_cacheop_poc_access },
4927 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4928 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4929 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4930 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4931 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4932 .access = PL0_W, .type = ARM_CP_NOP,
4933 .accessfn = aa64_cacheop_pou_access },
4934 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4935 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4936 .access = PL0_W, .type = ARM_CP_NOP,
4937 .accessfn = aa64_cacheop_poc_access },
4938 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4939 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4940 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4941 /* TLBI operations */
4942 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4943 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4944 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4945 .writefn = tlbi_aa64_vmalle1is_write },
4946 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4947 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4948 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4949 .writefn = tlbi_aa64_vae1is_write },
4950 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4951 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4952 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4953 .writefn = tlbi_aa64_vmalle1is_write },
4954 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4955 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4956 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4957 .writefn = tlbi_aa64_vae1is_write },
4958 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4959 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4960 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4961 .writefn = tlbi_aa64_vae1is_write },
4962 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4963 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4964 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4965 .writefn = tlbi_aa64_vae1is_write },
4966 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4967 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4968 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4969 .writefn = tlbi_aa64_vmalle1_write },
4970 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4971 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4972 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4973 .writefn = tlbi_aa64_vae1_write },
4974 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4975 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4976 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4977 .writefn = tlbi_aa64_vmalle1_write },
4978 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4979 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4980 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4981 .writefn = tlbi_aa64_vae1_write },
4982 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4983 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4984 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4985 .writefn = tlbi_aa64_vae1_write },
4986 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4987 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4988 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4989 .writefn = tlbi_aa64_vae1_write },
4990 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4991 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4992 .access = PL2_W, .type = ARM_CP_NOP },
4993 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4994 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4995 .access = PL2_W, .type = ARM_CP_NOP },
4996 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4997 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4998 .access = PL2_W, .type = ARM_CP_NO_RAW,
4999 .writefn = tlbi_aa64_alle1is_write },
5000 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
5001 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
5002 .access = PL2_W, .type = ARM_CP_NO_RAW,
5003 .writefn = tlbi_aa64_alle1is_write },
5004 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
5005 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5006 .access = PL2_W, .type = ARM_CP_NOP },
5007 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
5008 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5009 .access = PL2_W, .type = ARM_CP_NOP },
5010 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
5011 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5012 .access = PL2_W, .type = ARM_CP_NO_RAW,
5013 .writefn = tlbi_aa64_alle1_write },
5014 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
5015 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
5016 .access = PL2_W, .type = ARM_CP_NO_RAW,
5017 .writefn = tlbi_aa64_alle1is_write },
5018 #ifndef CONFIG_USER_ONLY
5019 /* 64 bit address translation operations */
5020 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
5021 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
5022 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5023 .writefn = ats_write64 },
5024 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
5025 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
5026 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5027 .writefn = ats_write64 },
5028 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
5029 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
5030 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5031 .writefn = ats_write64 },
5032 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
5033 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
5034 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5035 .writefn = ats_write64 },
5036 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
5037 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
5038 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5039 .writefn = ats_write64 },
5040 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
5041 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
5042 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5043 .writefn = ats_write64 },
5044 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
5045 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
5046 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5047 .writefn = ats_write64 },
5048 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
5049 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
5050 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5051 .writefn = ats_write64 },
5052 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
5053 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
5054 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
5055 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5056 .writefn = ats_write64 },
5057 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
5058 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
5059 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5060 .writefn = ats_write64 },
5061 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
5062 .type = ARM_CP_ALIAS,
5063 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
5064 .access = PL1_RW, .resetvalue = 0,
5065 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
5066 .writefn = par_write },
5068 /* TLB invalidate last level of translation table walk */
5069 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5070 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5071 .writefn = tlbimva_is_write },
5072 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5073 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5074 .writefn = tlbimvaa_is_write },
5075 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5076 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5077 .writefn = tlbimva_write },
5078 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5079 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5080 .writefn = tlbimvaa_write },
5081 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5082 .type = ARM_CP_NO_RAW, .access = PL2_W,
5083 .writefn = tlbimva_hyp_write },
5084 { .name = "TLBIMVALHIS",
5085 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5086 .type = ARM_CP_NO_RAW, .access = PL2_W,
5087 .writefn = tlbimva_hyp_is_write },
5088 { .name = "TLBIIPAS2",
5089 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5090 .type = ARM_CP_NOP, .access = PL2_W },
5091 { .name = "TLBIIPAS2IS",
5092 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5093 .type = ARM_CP_NOP, .access = PL2_W },
5094 { .name = "TLBIIPAS2L",
5095 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5096 .type = ARM_CP_NOP, .access = PL2_W },
5097 { .name = "TLBIIPAS2LIS",
5098 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5099 .type = ARM_CP_NOP, .access = PL2_W },
5100 /* 32 bit cache operations */
5101 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5102 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5103 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
5104 .type = ARM_CP_NOP, .access = PL1_W },
5105 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5106 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5107 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
5108 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5109 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5110 .type = ARM_CP_NOP, .access = PL1_W },
5111 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5112 .type = ARM_CP_NOP, .access = PL1_W },
5113 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5114 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5115 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5116 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5117 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5118 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5119 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5120 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5121 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5122 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5123 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5124 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5125 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5126 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5127 /* MMU Domain access control / MPU write buffer control */
5128 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5129 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
5130 .writefn = dacr_write, .raw_writefn = raw_write,
5131 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
5132 offsetoflow32(CPUARMState, cp15.dacr_ns) } },
5133 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5134 .type = ARM_CP_ALIAS,
5135 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
5137 .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
5138 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5139 .type = ARM_CP_ALIAS,
5140 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
5142 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
5143 /* We rely on the access checks not allowing the guest to write to the
5144 * state field when SPSel indicates that it's being used as the stack
5147 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5148 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5149 .access = PL1_RW, .accessfn = sp_el0_access,
5150 .type = ARM_CP_ALIAS,
5151 .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
5152 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5153 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
5154 .access = PL2_RW, .type = ARM_CP_ALIAS,
5155 .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
5156 { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5157 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
5158 .type = ARM_CP_NO_RAW,
5159 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
5160 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5161 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
5162 .type = ARM_CP_ALIAS,
5163 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
5164 .access = PL2_RW, .accessfn = fpexc32_access },
5165 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5166 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
5167 .access = PL2_RW, .resetvalue = 0,
5168 .writefn = dacr_write, .raw_writefn = raw_write,
5169 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5170 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5171 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
5172 .access = PL2_RW, .resetvalue = 0,
5173 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5174 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5175 .type = ARM_CP_ALIAS,
5176 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5178 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5179 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5180 .type = ARM_CP_ALIAS,
5181 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5183 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5184 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5185 .type = ARM_CP_ALIAS,
5186 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5188 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5189 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5190 .type = ARM_CP_ALIAS,
5191 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5193 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
5194 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5195 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5197 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5198 { .name = "SDCR", .type = ARM_CP_ALIAS,
5199 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5200 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5201 .writefn = sdcr_write,
5202 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
5206 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
5207 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
5208 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5209 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5211 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
5212 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
5213 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5215 .type = ARM_CP_CONST, .resetvalue = 0 },
5216 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5217 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5218 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5219 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5220 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5222 .type = ARM_CP_CONST, .resetvalue = 0 },
5223 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5224 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5225 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5226 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5227 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5228 .access = PL2_RW, .type = ARM_CP_CONST,
5230 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5231 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
5232 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5233 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5234 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5235 .access = PL2_RW, .type = ARM_CP_CONST,
5237 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5238 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5239 .access = PL2_RW, .type = ARM_CP_CONST,
5241 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5242 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5243 .access = PL2_RW, .type = ARM_CP_CONST,
5245 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5246 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5247 .access = PL2_RW, .type = ARM_CP_CONST,
5249 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5250 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5251 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5252 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
5253 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5254 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5255 .type = ARM_CP_CONST, .resetvalue = 0 },
5256 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5257 .cp = 15, .opc1 = 6, .crm = 2,
5258 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5259 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
5260 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5261 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5262 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5263 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5264 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5265 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5266 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5267 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5268 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5269 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5270 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5271 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5272 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5273 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5275 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5276 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5277 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5278 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5279 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5280 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5281 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5282 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5284 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5285 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5286 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5287 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5288 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5290 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5291 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5292 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5293 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5294 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5295 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5296 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5297 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5298 .access = PL2_RW, .accessfn = access_tda,
5299 .type = ARM_CP_CONST, .resetvalue = 0 },
5300 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
5301 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5302 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5303 .type = ARM_CP_CONST, .resetvalue = 0 },
5304 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5305 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5306 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5307 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5308 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5309 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5310 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5311 .type = ARM_CP_CONST,
5312 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5313 .access = PL2_RW, .resetvalue = 0 },
5317 /* Ditto, but for registers which exist in ARMv8 but not v7 */
5318 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
5319 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5320 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5322 .type = ARM_CP_CONST, .resetvalue = 0 },
5326 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
5328 ARMCPU *cpu = env_archcpu(env);
5330 if (arm_feature(env, ARM_FEATURE_V8)) {
5331 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */
5333 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */
5336 if (arm_feature(env, ARM_FEATURE_EL3)) {
5337 valid_mask &= ~HCR_HCD;
5338 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5339 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5340 * However, if we're using the SMC PSCI conduit then QEMU is
5341 * effectively acting like EL3 firmware and so the guest at
5342 * EL2 should retain the ability to prevent EL1 from being
5343 * able to make SMC calls into the ersatz firmware, so in
5344 * that case HCR.TSC should be read/write.
5346 valid_mask &= ~HCR_TSC;
5349 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5350 if (cpu_isar_feature(aa64_vh, cpu)) {
5351 valid_mask |= HCR_E2H;
5353 if (cpu_isar_feature(aa64_lor, cpu)) {
5354 valid_mask |= HCR_TLOR;
5356 if (cpu_isar_feature(aa64_pauth, cpu)) {
5357 valid_mask |= HCR_API | HCR_APK;
5359 if (cpu_isar_feature(aa64_mte, cpu)) {
5360 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
5364 /* Clear RES0 bits. */
5365 value &= valid_mask;
5368 * These bits change the MMU setup:
5369 * HCR_VM enables stage 2 translation
5370 * HCR_PTW forbids certain page-table setups
5371 * HCR_DC disables stage1 and enables stage2 translation
5372 * HCR_DCT enables tagging on (disabled) stage1 translation
5374 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) {
5375 tlb_flush(CPU(cpu));
5377 env->cp15.hcr_el2 = value;
5380 * Updates to VI and VF require us to update the status of
5381 * virtual interrupts, which are the logical OR of these bits
5382 * and the state of the input lines from the GIC. (This requires
5383 * that we have the iothread lock, which is done by marking the
5384 * reginfo structs as ARM_CP_IO.)
5385 * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5386 * possible for it to be taken immediately, because VIRQ and
5387 * VFIQ are masked unless running at EL0 or EL1, and HCR
5388 * can only be written at EL2.
5390 g_assert(qemu_mutex_iothread_locked());
5391 arm_cpu_update_virq(cpu);
5392 arm_cpu_update_vfiq(cpu);
5395 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5397 do_hcr_write(env, value, 0);
5400 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5403 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5404 value = deposit64(env->cp15.hcr_el2, 32, 32, value);
5405 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
5408 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5411 /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5412 value = deposit64(env->cp15.hcr_el2, 0, 32, value);
5413 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
5417 * Return the effective value of HCR_EL2.
5418 * Bits that are not included here:
5419 * RW (read from SCR_EL3.RW as needed)
5421 uint64_t arm_hcr_el2_eff(CPUARMState *env)
5423 uint64_t ret = env->cp15.hcr_el2;
5425 if (!arm_is_el2_enabled(env)) {
5427 * "This register has no effect if EL2 is not enabled in the
5428 * current Security state". This is ARMv8.4-SecEL2 speak for
5429 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5431 * Prior to that, the language was "In an implementation that
5432 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5433 * as if this field is 0 for all purposes other than a direct
5434 * read or write access of HCR_EL2". With lots of enumeration
5435 * on a per-field basis. In current QEMU, this is condition
5436 * is arm_is_secure_below_el3.
5438 * Since the v8.4 language applies to the entire register, and
5439 * appears to be backward compatible, use that.
5445 * For a cpu that supports both aarch64 and aarch32, we can set bits
5446 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5447 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5449 if (!arm_el_is_aa64(env, 2)) {
5450 uint64_t aa32_valid;
5453 * These bits are up-to-date as of ARMv8.6.
5454 * For HCR, it's easiest to list just the 2 bits that are invalid.
5455 * For HCR2, list those that are valid.
5457 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
5458 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
5459 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
5463 if (ret & HCR_TGE) {
5464 /* These bits are up-to-date as of ARMv8.6. */
5465 if (ret & HCR_E2H) {
5466 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
5467 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
5468 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
5469 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
5470 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
5471 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
5473 ret |= HCR_FMO | HCR_IMO | HCR_AMO;
5475 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
5476 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
5477 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
5484 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5488 * For A-profile AArch32 EL3, if NSACR.CP10
5489 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5491 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5492 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5493 value &= ~(0x3 << 10);
5494 value |= env->cp15.cptr_el[2] & (0x3 << 10);
5496 env->cp15.cptr_el[2] = value;
5499 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5502 * For A-profile AArch32 EL3, if NSACR.CP10
5503 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5505 uint64_t value = env->cp15.cptr_el[2];
5507 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5508 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5514 static const ARMCPRegInfo el2_cp_reginfo[] = {
5515 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
5517 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5518 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5519 .writefn = hcr_write },
5520 { .name = "HCR", .state = ARM_CP_STATE_AA32,
5521 .type = ARM_CP_ALIAS | ARM_CP_IO,
5522 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5523 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5524 .writefn = hcr_writelow },
5525 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5526 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5527 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5528 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
5529 .type = ARM_CP_ALIAS,
5530 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
5532 .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
5533 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5534 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5535 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
5536 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5537 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5538 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
5539 { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5540 .type = ARM_CP_ALIAS,
5541 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5543 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
5544 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
5545 .type = ARM_CP_ALIAS,
5546 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
5548 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
5549 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5550 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5551 .access = PL2_RW, .writefn = vbar_write,
5552 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
5554 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5555 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
5556 .access = PL3_RW, .type = ARM_CP_ALIAS,
5557 .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
5558 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5559 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5560 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
5561 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
5562 .readfn = cptr_el2_read, .writefn = cptr_el2_write },
5563 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5564 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5565 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
5567 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5568 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
5569 .access = PL2_RW, .type = ARM_CP_ALIAS,
5570 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
5571 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5572 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5573 .access = PL2_RW, .type = ARM_CP_CONST,
5575 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5576 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5577 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5578 .access = PL2_RW, .type = ARM_CP_CONST,
5580 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5581 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5582 .access = PL2_RW, .type = ARM_CP_CONST,
5584 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5585 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5586 .access = PL2_RW, .type = ARM_CP_CONST,
5588 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5589 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5590 .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
5591 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5592 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
5593 { .name = "VTCR", .state = ARM_CP_STATE_AA32,
5594 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5595 .type = ARM_CP_ALIAS,
5596 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5597 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5598 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
5599 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5601 /* no .writefn needed as this can't cause an ASID change;
5602 * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5604 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5605 { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5606 .cp = 15, .opc1 = 6, .crm = 2,
5607 .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5608 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5609 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
5610 .writefn = vttbr_write },
5611 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5612 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5613 .access = PL2_RW, .writefn = vttbr_write,
5614 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
5615 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5616 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5617 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
5618 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
5619 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5620 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5621 .access = PL2_RW, .resetvalue = 0,
5622 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
5623 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5624 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5625 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
5626 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5627 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5628 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5629 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5630 { .name = "TLBIALLNSNH",
5631 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5632 .type = ARM_CP_NO_RAW, .access = PL2_W,
5633 .writefn = tlbiall_nsnh_write },
5634 { .name = "TLBIALLNSNHIS",
5635 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5636 .type = ARM_CP_NO_RAW, .access = PL2_W,
5637 .writefn = tlbiall_nsnh_is_write },
5638 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5639 .type = ARM_CP_NO_RAW, .access = PL2_W,
5640 .writefn = tlbiall_hyp_write },
5641 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5642 .type = ARM_CP_NO_RAW, .access = PL2_W,
5643 .writefn = tlbiall_hyp_is_write },
5644 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5645 .type = ARM_CP_NO_RAW, .access = PL2_W,
5646 .writefn = tlbimva_hyp_write },
5647 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5648 .type = ARM_CP_NO_RAW, .access = PL2_W,
5649 .writefn = tlbimva_hyp_is_write },
5650 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
5651 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5652 .type = ARM_CP_NO_RAW, .access = PL2_W,
5653 .writefn = tlbi_aa64_alle2_write },
5654 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
5655 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5656 .type = ARM_CP_NO_RAW, .access = PL2_W,
5657 .writefn = tlbi_aa64_vae2_write },
5658 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
5659 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5660 .access = PL2_W, .type = ARM_CP_NO_RAW,
5661 .writefn = tlbi_aa64_vae2_write },
5662 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
5663 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5664 .access = PL2_W, .type = ARM_CP_NO_RAW,
5665 .writefn = tlbi_aa64_alle2is_write },
5666 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
5667 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5668 .type = ARM_CP_NO_RAW, .access = PL2_W,
5669 .writefn = tlbi_aa64_vae2is_write },
5670 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
5671 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5672 .access = PL2_W, .type = ARM_CP_NO_RAW,
5673 .writefn = tlbi_aa64_vae2is_write },
5674 #ifndef CONFIG_USER_ONLY
5675 /* Unlike the other EL2-related AT operations, these must
5676 * UNDEF from EL3 if EL2 is not implemented, which is why we
5677 * define them here rather than with the rest of the AT ops.
5679 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5680 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5681 .access = PL2_W, .accessfn = at_s1e2_access,
5682 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
5683 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5684 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5685 .access = PL2_W, .accessfn = at_s1e2_access,
5686 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
5687 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5688 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5689 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5690 * to behave as if SCR.NS was 1.
5692 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5694 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5695 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5697 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5698 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5699 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5700 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5701 * reset values as IMPDEF. We choose to reset to 3 to comply with
5702 * both ARMv7 and ARMv8.
5704 .access = PL2_RW, .resetvalue = 3,
5705 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
5706 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5707 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5708 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5709 .writefn = gt_cntvoff_write,
5710 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5711 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5712 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5713 .writefn = gt_cntvoff_write,
5714 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5715 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5716 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5717 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5718 .type = ARM_CP_IO, .access = PL2_RW,
5719 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5720 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5721 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5722 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5723 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5724 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5725 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5726 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5727 .resetfn = gt_hyp_timer_reset,
5728 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5729 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5731 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5733 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5735 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
5737 /* The only field of MDCR_EL2 that has a defined architectural reset value
5738 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
5739 * don't implement any PMU event counters, so using zero as a reset
5740 * value for MDCR_EL2 is okay
5742 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5743 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5744 .access = PL2_RW, .resetvalue = 0,
5745 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
5746 { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5747 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5748 .access = PL2_RW, .accessfn = access_el3_aa32ns,
5749 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5750 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5751 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5753 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5754 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5755 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5757 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
5761 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5762 { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5763 .type = ARM_CP_ALIAS | ARM_CP_IO,
5764 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5766 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5767 .writefn = hcr_writehigh },
5771 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
5774 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
5775 return CP_ACCESS_OK;
5777 return CP_ACCESS_TRAP_UNCATEGORIZED;
5780 static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
5781 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
5782 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
5783 .access = PL2_RW, .accessfn = sel2_access,
5784 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
5785 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
5786 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
5787 .access = PL2_RW, .accessfn = sel2_access,
5788 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
5792 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5795 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5796 * At Secure EL1 it traps to EL3 or EL2.
5798 if (arm_current_el(env) == 3) {
5799 return CP_ACCESS_OK;
5801 if (arm_is_secure_below_el3(env)) {
5802 if (env->cp15.scr_el3 & SCR_EEL2) {
5803 return CP_ACCESS_TRAP_EL2;
5805 return CP_ACCESS_TRAP_EL3;
5807 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5809 return CP_ACCESS_OK;
5811 return CP_ACCESS_TRAP_UNCATEGORIZED;
5814 static const ARMCPRegInfo el3_cp_reginfo[] = {
5815 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5816 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5817 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
5818 .resetfn = scr_reset, .writefn = scr_write },
5819 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
5820 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
5821 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5822 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
5823 .writefn = scr_write },
5824 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5825 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5826 .access = PL3_RW, .resetvalue = 0,
5827 .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5829 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5830 .access = PL3_RW, .resetvalue = 0,
5831 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
5832 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5833 .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5834 .writefn = vbar_write, .resetvalue = 0,
5835 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
5836 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5837 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
5838 .access = PL3_RW, .resetvalue = 0,
5839 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
5840 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5841 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
5843 /* no .writefn needed as this can't cause an ASID change;
5844 * we must provide a .raw_writefn and .resetfn because we handle
5845 * reset and migration for the AArch32 TTBCR(S), which might be
5846 * using mask and base_mask.
5848 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
5849 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
5850 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
5851 .type = ARM_CP_ALIAS,
5852 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5854 .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
5855 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5856 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5857 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
5858 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5859 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5860 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
5861 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5862 .type = ARM_CP_ALIAS,
5863 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
5865 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
5866 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5867 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5868 .access = PL3_RW, .writefn = vbar_write,
5869 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5871 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5872 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5873 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5874 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
5875 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5876 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5877 .access = PL3_RW, .resetvalue = 0,
5878 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
5879 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5880 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5881 .access = PL3_RW, .type = ARM_CP_CONST,
5883 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5884 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5885 .access = PL3_RW, .type = ARM_CP_CONST,
5887 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5888 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5889 .access = PL3_RW, .type = ARM_CP_CONST,
5891 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5892 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5893 .access = PL3_W, .type = ARM_CP_NO_RAW,
5894 .writefn = tlbi_aa64_alle3is_write },
5895 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5896 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5897 .access = PL3_W, .type = ARM_CP_NO_RAW,
5898 .writefn = tlbi_aa64_vae3is_write },
5899 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5900 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5901 .access = PL3_W, .type = ARM_CP_NO_RAW,
5902 .writefn = tlbi_aa64_vae3is_write },
5903 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5904 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5905 .access = PL3_W, .type = ARM_CP_NO_RAW,
5906 .writefn = tlbi_aa64_alle3_write },
5907 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5908 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5909 .access = PL3_W, .type = ARM_CP_NO_RAW,
5910 .writefn = tlbi_aa64_vae3_write },
5911 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5912 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5913 .access = PL3_W, .type = ARM_CP_NO_RAW,
5914 .writefn = tlbi_aa64_vae3_write },
5918 #ifndef CONFIG_USER_ONLY
5919 /* Test if system register redirection is to occur in the current state. */
5920 static bool redirect_for_e2h(CPUARMState *env)
5922 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
5925 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
5929 if (redirect_for_e2h(env)) {
5930 /* Switch to the saved EL2 version of the register. */
5932 readfn = ri->readfn;
5934 readfn = ri->orig_readfn;
5936 if (readfn == NULL) {
5939 return readfn(env, ri);
5942 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
5947 if (redirect_for_e2h(env)) {
5948 /* Switch to the saved EL2 version of the register. */
5950 writefn = ri->writefn;
5952 writefn = ri->orig_writefn;
5954 if (writefn == NULL) {
5955 writefn = raw_write;
5957 writefn(env, ri, value);
5960 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
5963 uint32_t src_key, dst_key, new_key;
5964 const char *src_name, *dst_name, *new_name;
5965 bool (*feature)(const ARMISARegisters *id);
5968 #define K(op0, op1, crn, crm, op2) \
5969 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5971 static const struct E2HAlias aliases[] = {
5972 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
5973 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5974 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
5975 "CPACR", "CPTR_EL2", "CPACR_EL12" },
5976 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
5977 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5978 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
5979 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5980 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
5981 "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5982 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
5983 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5984 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
5985 "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5986 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
5987 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5988 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
5989 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5990 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
5991 "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5992 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
5993 "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5994 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5995 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5996 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5997 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5998 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5999 "VBAR", "VBAR_EL2", "VBAR_EL12" },
6000 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
6001 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
6002 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
6003 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
6006 * Note that redirection of ZCR is mentioned in the description
6007 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
6008 * not in the summary table.
6010 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
6011 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
6013 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
6014 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
6016 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
6017 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
6023 for (i = 0; i < ARRAY_SIZE(aliases); i++) {
6024 const struct E2HAlias *a = &aliases[i];
6025 ARMCPRegInfo *src_reg, *dst_reg;
6027 if (a->feature && !a->feature(&cpu->isar)) {
6031 src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key);
6032 dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key);
6033 g_assert(src_reg != NULL);
6034 g_assert(dst_reg != NULL);
6036 /* Cross-compare names to detect typos in the keys. */
6037 g_assert(strcmp(src_reg->name, a->src_name) == 0);
6038 g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
6040 /* None of the core system registers use opaque; we will. */
6041 g_assert(src_reg->opaque == NULL);
6043 /* Create alias before redirection so we dup the right data. */
6045 ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
6046 uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t));
6049 new_reg->name = a->new_name;
6050 new_reg->type |= ARM_CP_ALIAS;
6051 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
6052 new_reg->access &= PL2_RW | PL3_RW;
6054 ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
6058 src_reg->opaque = dst_reg;
6059 src_reg->orig_readfn = src_reg->readfn ?: raw_read;
6060 src_reg->orig_writefn = src_reg->writefn ?: raw_write;
6061 if (!src_reg->raw_readfn) {
6062 src_reg->raw_readfn = raw_read;
6064 if (!src_reg->raw_writefn) {
6065 src_reg->raw_writefn = raw_write;
6067 src_reg->readfn = el2_e2h_read;
6068 src_reg->writefn = el2_e2h_write;
6073 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
6076 int cur_el = arm_current_el(env);
6079 uint64_t hcr = arm_hcr_el2_eff(env);
6082 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
6083 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
6084 return CP_ACCESS_TRAP_EL2;
6087 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
6088 return CP_ACCESS_TRAP;
6090 if (hcr & HCR_TID2) {
6091 return CP_ACCESS_TRAP_EL2;
6094 } else if (hcr & HCR_TID2) {
6095 return CP_ACCESS_TRAP_EL2;
6099 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
6100 return CP_ACCESS_TRAP_EL2;
6103 return CP_ACCESS_OK;
6106 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
6109 /* Writes to OSLAR_EL1 may update the OS lock status, which can be
6110 * read via a bit in OSLSR_EL1.
6114 if (ri->state == ARM_CP_STATE_AA32) {
6115 oslock = (value == 0xC5ACCE55);
6120 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
6123 static const ARMCPRegInfo debug_cp_reginfo[] = {
6124 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
6125 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
6126 * unlike DBGDRAR it is never accessible from EL0.
6127 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
6130 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
6131 .access = PL0_R, .accessfn = access_tdra,
6132 .type = ARM_CP_CONST, .resetvalue = 0 },
6133 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
6134 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
6135 .access = PL1_R, .accessfn = access_tdra,
6136 .type = ARM_CP_CONST, .resetvalue = 0 },
6137 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
6138 .access = PL0_R, .accessfn = access_tdra,
6139 .type = ARM_CP_CONST, .resetvalue = 0 },
6140 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
6141 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
6142 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6143 .access = PL1_RW, .accessfn = access_tda,
6144 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
6146 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
6147 * We don't implement the configurable EL0 access.
6149 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
6150 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6151 .type = ARM_CP_ALIAS,
6152 .access = PL1_R, .accessfn = access_tda,
6153 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
6154 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
6155 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
6156 .access = PL1_W, .type = ARM_CP_NO_RAW,
6157 .accessfn = access_tdosa,
6158 .writefn = oslar_write },
6159 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
6160 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
6161 .access = PL1_R, .resetvalue = 10,
6162 .accessfn = access_tdosa,
6163 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
6164 /* Dummy OSDLR_EL1: 32-bit Linux will read this */
6165 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
6166 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
6167 .access = PL1_RW, .accessfn = access_tdosa,
6168 .type = ARM_CP_NOP },
6169 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
6170 * implement vector catch debug events yet.
6173 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6174 .access = PL1_RW, .accessfn = access_tda,
6175 .type = ARM_CP_NOP },
6176 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
6177 * to save and restore a 32-bit guest's DBGVCR)
6179 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
6180 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
6181 .access = PL2_RW, .accessfn = access_tda,
6182 .type = ARM_CP_NOP },
6183 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
6184 * Channel but Linux may try to access this register. The 32-bit
6185 * alias is DBGDCCINT.
6187 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
6188 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6189 .access = PL1_RW, .accessfn = access_tda,
6190 .type = ARM_CP_NOP },
6194 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
6195 /* 64 bit access versions of the (dummy) debug registers */
6196 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
6197 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
6198 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
6199 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
6203 /* Return the exception level to which exceptions should be taken
6204 * via SVEAccessTrap. If an exception should be routed through
6205 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
6206 * take care of raising that exception.
6207 * C.f. the ARM pseudocode function CheckSVEEnabled.
6209 int sve_exception_el(CPUARMState *env, int el)
6211 #ifndef CONFIG_USER_ONLY
6212 uint64_t hcr_el2 = arm_hcr_el2_eff(env);
6214 if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
6215 bool disabled = false;
6217 /* The CPACR.ZEN controls traps to EL1:
6218 * 0, 2 : trap EL0 and EL1 accesses
6219 * 1 : trap only EL0 accesses
6220 * 3 : trap no accesses
6222 if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
6224 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
6229 return hcr_el2 & HCR_TGE ? 2 : 1;
6232 /* Check CPACR.FPEN. */
6233 if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
6235 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
6243 /* CPTR_EL2. Since TZ and TFP are positive,
6244 * they will be zero when EL2 is not present.
6246 if (el <= 2 && arm_is_el2_enabled(env)) {
6247 if (env->cp15.cptr_el[2] & CPTR_TZ) {
6250 if (env->cp15.cptr_el[2] & CPTR_TFP) {
6255 /* CPTR_EL3. Since EZ is negative we must check for EL3. */
6256 if (arm_feature(env, ARM_FEATURE_EL3)
6257 && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
6264 static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
6268 end_len = start_len &= 0xf;
6269 if (!test_bit(start_len, cpu->sve_vq_map)) {
6270 end_len = find_last_bit(cpu->sve_vq_map, start_len);
6271 assert(end_len < start_len);
6277 * Given that SVE is enabled, return the vector length for EL.
6279 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
6281 ARMCPU *cpu = env_archcpu(env);
6282 uint32_t zcr_len = cpu->sve_max_vq - 1;
6285 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
6287 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
6288 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
6290 if (arm_feature(env, ARM_FEATURE_EL3)) {
6291 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
6294 return sve_zcr_get_valid_len(cpu, zcr_len);
6297 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6300 int cur_el = arm_current_el(env);
6301 int old_len = sve_zcr_len_for_el(env, cur_el);
6304 /* Bits other than [3:0] are RAZ/WI. */
6305 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
6306 raw_write(env, ri, value & 0xf);
6309 * Because we arrived here, we know both FP and SVE are enabled;
6310 * otherwise we would have trapped access to the ZCR_ELn register.
6312 new_len = sve_zcr_len_for_el(env, cur_el);
6313 if (new_len < old_len) {
6314 aarch64_sve_narrow_vq(env, new_len + 1);
6318 static const ARMCPRegInfo zcr_el1_reginfo = {
6319 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6320 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
6321 .access = PL1_RW, .type = ARM_CP_SVE,
6322 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
6323 .writefn = zcr_write, .raw_writefn = raw_write
6326 static const ARMCPRegInfo zcr_el2_reginfo = {
6327 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6328 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6329 .access = PL2_RW, .type = ARM_CP_SVE,
6330 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
6331 .writefn = zcr_write, .raw_writefn = raw_write
6334 static const ARMCPRegInfo zcr_no_el2_reginfo = {
6335 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6336 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6337 .access = PL2_RW, .type = ARM_CP_SVE,
6338 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
6341 static const ARMCPRegInfo zcr_el3_reginfo = {
6342 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6343 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
6344 .access = PL3_RW, .type = ARM_CP_SVE,
6345 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
6346 .writefn = zcr_write, .raw_writefn = raw_write
6349 void hw_watchpoint_update(ARMCPU *cpu, int n)
6351 CPUARMState *env = &cpu->env;
6353 vaddr wvr = env->cp15.dbgwvr[n];
6354 uint64_t wcr = env->cp15.dbgwcr[n];
6356 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
6358 if (env->cpu_watchpoint[n]) {
6359 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
6360 env->cpu_watchpoint[n] = NULL;
6363 if (!extract64(wcr, 0, 1)) {
6364 /* E bit clear : watchpoint disabled */
6368 switch (extract64(wcr, 3, 2)) {
6370 /* LSC 00 is reserved and must behave as if the wp is disabled */
6373 flags |= BP_MEM_READ;
6376 flags |= BP_MEM_WRITE;
6379 flags |= BP_MEM_ACCESS;
6383 /* Attempts to use both MASK and BAS fields simultaneously are
6384 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6385 * thus generating a watchpoint for every byte in the masked region.
6387 mask = extract64(wcr, 24, 4);
6388 if (mask == 1 || mask == 2) {
6389 /* Reserved values of MASK; we must act as if the mask value was
6390 * some non-reserved value, or as if the watchpoint were disabled.
6391 * We choose the latter.
6395 /* Watchpoint covers an aligned area up to 2GB in size */
6397 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6398 * whether the watchpoint fires when the unmasked bits match; we opt
6399 * to generate the exceptions.
6403 /* Watchpoint covers bytes defined by the byte address select bits */
6404 int bas = extract64(wcr, 5, 8);
6407 if (extract64(wvr, 2, 1)) {
6408 /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6409 * ignored, and BAS[3:0] define which bytes to watch.
6415 /* This must act as if the watchpoint is disabled */
6419 /* The BAS bits are supposed to be programmed to indicate a contiguous
6420 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6421 * we fire for each byte in the word/doubleword addressed by the WVR.
6422 * We choose to ignore any non-zero bits after the first range of 1s.
6424 basstart = ctz32(bas);
6425 len = cto32(bas >> basstart);
6429 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
6430 &env->cpu_watchpoint[n]);
6433 void hw_watchpoint_update_all(ARMCPU *cpu)
6436 CPUARMState *env = &cpu->env;
6438 /* Completely clear out existing QEMU watchpoints and our array, to
6439 * avoid possible stale entries following migration load.
6441 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
6442 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
6444 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
6445 hw_watchpoint_update(cpu, i);
6449 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6452 ARMCPU *cpu = env_archcpu(env);
6455 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
6456 * register reads and behaves as if values written are sign extended.
6457 * Bits [1:0] are RES0.
6459 value = sextract64(value, 0, 49) & ~3ULL;
6461 raw_write(env, ri, value);
6462 hw_watchpoint_update(cpu, i);
6465 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6468 ARMCPU *cpu = env_archcpu(env);
6471 raw_write(env, ri, value);
6472 hw_watchpoint_update(cpu, i);
6475 void hw_breakpoint_update(ARMCPU *cpu, int n)
6477 CPUARMState *env = &cpu->env;
6478 uint64_t bvr = env->cp15.dbgbvr[n];
6479 uint64_t bcr = env->cp15.dbgbcr[n];
6484 if (env->cpu_breakpoint[n]) {
6485 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
6486 env->cpu_breakpoint[n] = NULL;
6489 if (!extract64(bcr, 0, 1)) {
6490 /* E bit clear : watchpoint disabled */
6494 bt = extract64(bcr, 20, 4);
6497 case 4: /* unlinked address mismatch (reserved if AArch64) */
6498 case 5: /* linked address mismatch (reserved if AArch64) */
6499 qemu_log_mask(LOG_UNIMP,
6500 "arm: address mismatch breakpoint types not implemented\n");
6502 case 0: /* unlinked address match */
6503 case 1: /* linked address match */
6505 /* Bits [63:49] are hardwired to the value of bit [48]; that is,
6506 * we behave as if the register was sign extended. Bits [1:0] are
6507 * RES0. The BAS field is used to allow setting breakpoints on 16
6508 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6509 * a bp will fire if the addresses covered by the bp and the addresses
6510 * covered by the insn overlap but the insn doesn't start at the
6511 * start of the bp address range. We choose to require the insn and
6512 * the bp to have the same address. The constraints on writing to
6513 * BAS enforced in dbgbcr_write mean we have only four cases:
6514 * 0b0000 => no breakpoint
6515 * 0b0011 => breakpoint on addr
6516 * 0b1100 => breakpoint on addr + 2
6517 * 0b1111 => breakpoint on addr
6518 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6520 int bas = extract64(bcr, 5, 4);
6521 addr = sextract64(bvr, 0, 49) & ~3ULL;
6530 case 2: /* unlinked context ID match */
6531 case 8: /* unlinked VMID match (reserved if no EL2) */
6532 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6533 qemu_log_mask(LOG_UNIMP,
6534 "arm: unlinked context breakpoint types not implemented\n");
6536 case 9: /* linked VMID match (reserved if no EL2) */
6537 case 11: /* linked context ID and VMID match (reserved if no EL2) */
6538 case 3: /* linked context ID match */
6540 /* We must generate no events for Linked context matches (unless
6541 * they are linked to by some other bp/wp, which is handled in
6542 * updates for the linking bp/wp). We choose to also generate no events
6543 * for reserved values.
6548 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
6551 void hw_breakpoint_update_all(ARMCPU *cpu)
6554 CPUARMState *env = &cpu->env;
6556 /* Completely clear out existing QEMU breakpoints and our array, to
6557 * avoid possible stale entries following migration load.
6559 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
6560 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
6562 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
6563 hw_breakpoint_update(cpu, i);
6567 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6570 ARMCPU *cpu = env_archcpu(env);
6573 raw_write(env, ri, value);
6574 hw_breakpoint_update(cpu, i);
6577 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6580 ARMCPU *cpu = env_archcpu(env);
6583 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6586 value = deposit64(value, 6, 1, extract64(value, 5, 1));
6587 value = deposit64(value, 8, 1, extract64(value, 7, 1));
6589 raw_write(env, ri, value);
6590 hw_breakpoint_update(cpu, i);
6593 static void define_debug_regs(ARMCPU *cpu)
6595 /* Define v7 and v8 architectural debug registers.
6596 * These are just dummy implementations for now.
6599 int wrps, brps, ctx_cmps;
6602 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot
6603 * use AArch32. Given that bit 15 is RES1, if the value is 0 then
6604 * the register must not exist for this cpu.
6606 if (cpu->isar.dbgdidr != 0) {
6607 ARMCPRegInfo dbgdidr = {
6608 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0,
6609 .opc1 = 0, .opc2 = 0,
6610 .access = PL0_R, .accessfn = access_tda,
6611 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr,
6613 define_one_arm_cp_reg(cpu, &dbgdidr);
6616 /* Note that all these register fields hold "number of Xs minus 1". */
6617 brps = arm_num_brps(cpu);
6618 wrps = arm_num_wrps(cpu);
6619 ctx_cmps = arm_num_ctx_cmps(cpu);
6621 assert(ctx_cmps <= brps);
6623 define_arm_cp_regs(cpu, debug_cp_reginfo);
6625 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
6626 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
6629 for (i = 0; i < brps; i++) {
6630 ARMCPRegInfo dbgregs[] = {
6631 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
6632 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
6633 .access = PL1_RW, .accessfn = access_tda,
6634 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
6635 .writefn = dbgbvr_write, .raw_writefn = raw_write
6637 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
6638 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
6639 .access = PL1_RW, .accessfn = access_tda,
6640 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
6641 .writefn = dbgbcr_write, .raw_writefn = raw_write
6645 define_arm_cp_regs(cpu, dbgregs);
6648 for (i = 0; i < wrps; i++) {
6649 ARMCPRegInfo dbgregs[] = {
6650 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
6651 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
6652 .access = PL1_RW, .accessfn = access_tda,
6653 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
6654 .writefn = dbgwvr_write, .raw_writefn = raw_write
6656 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
6657 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
6658 .access = PL1_RW, .accessfn = access_tda,
6659 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
6660 .writefn = dbgwcr_write, .raw_writefn = raw_write
6664 define_arm_cp_regs(cpu, dbgregs);
6668 static void define_pmu_regs(ARMCPU *cpu)
6671 * v7 performance monitor control register: same implementor
6672 * field as main ID register, and we implement four counters in
6673 * addition to the cycle count register.
6675 unsigned int i, pmcrn = 4;
6676 ARMCPRegInfo pmcr = {
6677 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
6679 .type = ARM_CP_IO | ARM_CP_ALIAS,
6680 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
6681 .accessfn = pmreg_access, .writefn = pmcr_write,
6682 .raw_writefn = raw_write,
6684 ARMCPRegInfo pmcr64 = {
6685 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6686 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6687 .access = PL0_RW, .accessfn = pmreg_access,
6689 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
6690 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
6692 .writefn = pmcr_write, .raw_writefn = raw_write,
6694 define_one_arm_cp_reg(cpu, &pmcr);
6695 define_one_arm_cp_reg(cpu, &pmcr64);
6696 for (i = 0; i < pmcrn; i++) {
6697 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6698 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6699 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6700 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6701 ARMCPRegInfo pmev_regs[] = {
6702 { .name = pmevcntr_name, .cp = 15, .crn = 14,
6703 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6704 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6705 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6706 .accessfn = pmreg_access },
6707 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6708 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
6709 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6711 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6712 .raw_readfn = pmevcntr_rawread,
6713 .raw_writefn = pmevcntr_rawwrite },
6714 { .name = pmevtyper_name, .cp = 15, .crn = 14,
6715 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6716 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6717 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6718 .accessfn = pmreg_access },
6719 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6720 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6721 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6723 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6724 .raw_writefn = pmevtyper_rawwrite },
6727 define_arm_cp_regs(cpu, pmev_regs);
6728 g_free(pmevcntr_name);
6729 g_free(pmevcntr_el0_name);
6730 g_free(pmevtyper_name);
6731 g_free(pmevtyper_el0_name);
6733 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
6734 ARMCPRegInfo v81_pmu_regs[] = {
6735 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6736 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6737 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6738 .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6739 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6740 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6741 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6742 .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6745 define_arm_cp_regs(cpu, v81_pmu_regs);
6747 if (cpu_isar_feature(any_pmu_8_4, cpu)) {
6748 static const ARMCPRegInfo v84_pmmir = {
6749 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
6750 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
6751 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6754 define_one_arm_cp_reg(cpu, &v84_pmmir);
6758 /* We don't know until after realize whether there's a GICv3
6759 * attached, and that is what registers the gicv3 sysregs.
6760 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6763 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
6765 ARMCPU *cpu = env_archcpu(env);
6766 uint64_t pfr1 = cpu->isar.id_pfr1;
6768 if (env->gicv3state) {
6774 #ifndef CONFIG_USER_ONLY
6775 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
6777 ARMCPU *cpu = env_archcpu(env);
6778 uint64_t pfr0 = cpu->isar.id_aa64pfr0;
6780 if (env->gicv3state) {
6787 /* Shared logic between LORID and the rest of the LOR* registers.
6788 * Secure state exclusion has already been dealt with.
6790 static CPAccessResult access_lor_ns(CPUARMState *env,
6791 const ARMCPRegInfo *ri, bool isread)
6793 int el = arm_current_el(env);
6795 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
6796 return CP_ACCESS_TRAP_EL2;
6798 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
6799 return CP_ACCESS_TRAP_EL3;
6801 return CP_ACCESS_OK;
6804 static CPAccessResult access_lor_other(CPUARMState *env,
6805 const ARMCPRegInfo *ri, bool isread)
6807 if (arm_is_secure_below_el3(env)) {
6808 /* Access denied in secure mode. */
6809 return CP_ACCESS_TRAP;
6811 return access_lor_ns(env, ri, isread);
6815 * A trivial implementation of ARMv8.1-LOR leaves all of these
6816 * registers fixed at 0, which indicates that there are zero
6817 * supported Limited Ordering regions.
6819 static const ARMCPRegInfo lor_reginfo[] = {
6820 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6821 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6822 .access = PL1_RW, .accessfn = access_lor_other,
6823 .type = ARM_CP_CONST, .resetvalue = 0 },
6824 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6825 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6826 .access = PL1_RW, .accessfn = access_lor_other,
6827 .type = ARM_CP_CONST, .resetvalue = 0 },
6828 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6829 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6830 .access = PL1_RW, .accessfn = access_lor_other,
6831 .type = ARM_CP_CONST, .resetvalue = 0 },
6832 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6833 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6834 .access = PL1_RW, .accessfn = access_lor_other,
6835 .type = ARM_CP_CONST, .resetvalue = 0 },
6836 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6837 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6838 .access = PL1_R, .accessfn = access_lor_ns,
6839 .type = ARM_CP_CONST, .resetvalue = 0 },
6843 #ifdef TARGET_AARCH64
6844 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
6847 int el = arm_current_el(env);
6850 arm_feature(env, ARM_FEATURE_EL2) &&
6851 !(arm_hcr_el2_eff(env) & HCR_APK)) {
6852 return CP_ACCESS_TRAP_EL2;
6855 arm_feature(env, ARM_FEATURE_EL3) &&
6856 !(env->cp15.scr_el3 & SCR_APK)) {
6857 return CP_ACCESS_TRAP_EL3;
6859 return CP_ACCESS_OK;
6862 static const ARMCPRegInfo pauth_reginfo[] = {
6863 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6864 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
6865 .access = PL1_RW, .accessfn = access_pauth,
6866 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
6867 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6868 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
6869 .access = PL1_RW, .accessfn = access_pauth,
6870 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
6871 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6872 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
6873 .access = PL1_RW, .accessfn = access_pauth,
6874 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
6875 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6876 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
6877 .access = PL1_RW, .accessfn = access_pauth,
6878 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
6879 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6880 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
6881 .access = PL1_RW, .accessfn = access_pauth,
6882 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
6883 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6884 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
6885 .access = PL1_RW, .accessfn = access_pauth,
6886 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
6887 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6888 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
6889 .access = PL1_RW, .accessfn = access_pauth,
6890 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
6891 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6892 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
6893 .access = PL1_RW, .accessfn = access_pauth,
6894 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
6895 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6896 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
6897 .access = PL1_RW, .accessfn = access_pauth,
6898 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
6899 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6900 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
6901 .access = PL1_RW, .accessfn = access_pauth,
6902 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
6906 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
6911 /* Success sets NZCV = 0000. */
6912 env->NF = env->CF = env->VF = 0, env->ZF = 1;
6914 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
6916 * ??? Failed, for unknown reasons in the crypto subsystem.
6917 * The best we can do is log the reason and return the
6918 * timed-out indication to the guest. There is no reason
6919 * we know to expect this failure to be transitory, so the
6920 * guest may well hang retrying the operation.
6922 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
6923 ri->name, error_get_pretty(err));
6926 env->ZF = 0; /* NZCF = 0100 */
6932 /* We do not support re-seeding, so the two registers operate the same. */
6933 static const ARMCPRegInfo rndr_reginfo[] = {
6934 { .name = "RNDR", .state = ARM_CP_STATE_AA64,
6935 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6936 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
6937 .access = PL0_R, .readfn = rndr_readfn },
6938 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
6939 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6940 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
6941 .access = PL0_R, .readfn = rndr_readfn },
6945 #ifndef CONFIG_USER_ONLY
6946 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
6949 ARMCPU *cpu = env_archcpu(env);
6950 /* CTR_EL0 System register -> DminLine, bits [19:16] */
6951 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
6952 uint64_t vaddr_in = (uint64_t) value;
6953 uint64_t vaddr = vaddr_in & ~(dline_size - 1);
6955 int mem_idx = cpu_mmu_index(env, false);
6957 /* This won't be crossing page boundaries */
6958 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
6964 /* RCU lock is already being held */
6965 mr = memory_region_from_host(haddr, &offset);
6968 memory_region_writeback(mr, offset, dline_size);
6973 static const ARMCPRegInfo dcpop_reg[] = {
6974 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
6975 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
6976 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6977 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
6981 static const ARMCPRegInfo dcpodp_reg[] = {
6982 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
6983 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
6984 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6985 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
6988 #endif /*CONFIG_USER_ONLY*/
6990 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
6993 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
6994 return CP_ACCESS_TRAP_EL2;
6997 return CP_ACCESS_OK;
7000 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
7003 int el = arm_current_el(env);
7005 if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
7006 uint64_t hcr = arm_hcr_el2_eff(env);
7007 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
7008 return CP_ACCESS_TRAP_EL2;
7012 arm_feature(env, ARM_FEATURE_EL3) &&
7013 !(env->cp15.scr_el3 & SCR_ATA)) {
7014 return CP_ACCESS_TRAP_EL3;
7016 return CP_ACCESS_OK;
7019 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
7021 return env->pstate & PSTATE_TCO;
7024 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
7026 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
7029 static const ARMCPRegInfo mte_reginfo[] = {
7030 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
7031 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
7032 .access = PL1_RW, .accessfn = access_mte,
7033 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
7034 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
7035 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
7036 .access = PL1_RW, .accessfn = access_mte,
7037 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
7038 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
7039 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
7040 .access = PL2_RW, .accessfn = access_mte,
7041 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
7042 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
7043 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
7045 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
7046 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
7047 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
7048 .access = PL1_RW, .accessfn = access_mte,
7049 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
7050 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
7051 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
7052 .access = PL1_RW, .accessfn = access_mte,
7053 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
7054 { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
7055 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
7056 .access = PL1_R, .accessfn = access_aa64_tid5,
7057 .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
7058 { .name = "TCO", .state = ARM_CP_STATE_AA64,
7059 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7060 .type = ARM_CP_NO_RAW,
7061 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
7062 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
7063 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
7064 .type = ARM_CP_NOP, .access = PL1_W,
7065 .accessfn = aa64_cacheop_poc_access },
7066 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
7067 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
7068 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7069 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
7070 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
7071 .type = ARM_CP_NOP, .access = PL1_W,
7072 .accessfn = aa64_cacheop_poc_access },
7073 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
7074 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
7075 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7076 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
7077 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
7078 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7079 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
7080 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
7081 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7082 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
7083 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
7084 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7085 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
7086 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
7087 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
7091 static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
7092 { .name = "TCO", .state = ARM_CP_STATE_AA64,
7093 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
7094 .type = ARM_CP_CONST, .access = PL0_RW, },
7098 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
7099 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
7100 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
7101 .type = ARM_CP_NOP, .access = PL0_W,
7102 .accessfn = aa64_cacheop_poc_access },
7103 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
7104 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
7105 .type = ARM_CP_NOP, .access = PL0_W,
7106 .accessfn = aa64_cacheop_poc_access },
7107 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
7108 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
7109 .type = ARM_CP_NOP, .access = PL0_W,
7110 .accessfn = aa64_cacheop_poc_access },
7111 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
7112 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
7113 .type = ARM_CP_NOP, .access = PL0_W,
7114 .accessfn = aa64_cacheop_poc_access },
7115 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
7116 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
7117 .type = ARM_CP_NOP, .access = PL0_W,
7118 .accessfn = aa64_cacheop_poc_access },
7119 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
7120 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
7121 .type = ARM_CP_NOP, .access = PL0_W,
7122 .accessfn = aa64_cacheop_poc_access },
7123 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
7124 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
7125 .type = ARM_CP_NOP, .access = PL0_W,
7126 .accessfn = aa64_cacheop_poc_access },
7127 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
7128 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
7129 .type = ARM_CP_NOP, .access = PL0_W,
7130 .accessfn = aa64_cacheop_poc_access },
7131 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
7132 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
7133 .access = PL0_W, .type = ARM_CP_DC_GVA,
7134 #ifndef CONFIG_USER_ONLY
7135 /* Avoid overhead of an access check that always passes in user-mode */
7136 .accessfn = aa64_zva_access,
7139 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
7140 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
7141 .access = PL0_W, .type = ARM_CP_DC_GZVA,
7142 #ifndef CONFIG_USER_ONLY
7143 /* Avoid overhead of an access check that always passes in user-mode */
7144 .accessfn = aa64_zva_access,
7152 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
7155 int el = arm_current_el(env);
7158 uint64_t sctlr = arm_sctlr(env, el);
7159 if (!(sctlr & SCTLR_EnRCTX)) {
7160 return CP_ACCESS_TRAP;
7162 } else if (el == 1) {
7163 uint64_t hcr = arm_hcr_el2_eff(env);
7165 return CP_ACCESS_TRAP_EL2;
7168 return CP_ACCESS_OK;
7171 static const ARMCPRegInfo predinv_reginfo[] = {
7172 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
7173 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
7174 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7175 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
7176 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
7177 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7178 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
7179 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
7180 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7182 * Note the AArch32 opcodes have a different OPC1.
7184 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
7185 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
7186 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7187 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
7188 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
7189 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7190 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
7191 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
7192 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7196 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
7198 /* Read the high 32 bits of the current CCSIDR */
7199 return extract64(ccsidr_read(env, ri), 32, 32);
7202 static const ARMCPRegInfo ccsidr2_reginfo[] = {
7203 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
7204 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
7206 .accessfn = access_aa64_tid2,
7207 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
7211 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7214 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
7215 return CP_ACCESS_TRAP_EL2;
7218 return CP_ACCESS_OK;
7221 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7224 if (arm_feature(env, ARM_FEATURE_V8)) {
7225 return access_aa64_tid3(env, ri, isread);
7228 return CP_ACCESS_OK;
7231 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
7234 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
7235 return CP_ACCESS_TRAP_EL2;
7238 return CP_ACCESS_OK;
7241 static const ARMCPRegInfo jazelle_regs[] = {
7243 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
7244 .access = PL1_R, .accessfn = access_jazelle,
7245 .type = ARM_CP_CONST, .resetvalue = 0 },
7247 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
7248 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7250 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
7251 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7255 static const ARMCPRegInfo vhe_reginfo[] = {
7256 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
7257 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
7259 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
7260 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
7261 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
7262 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
7263 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
7264 #ifndef CONFIG_USER_ONLY
7265 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
7266 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
7268 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
7269 .type = ARM_CP_IO, .access = PL2_RW,
7270 .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
7271 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
7272 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
7273 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
7274 .resetfn = gt_hv_timer_reset,
7275 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
7276 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
7278 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
7280 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
7281 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
7282 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
7283 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
7284 .type = ARM_CP_IO | ARM_CP_ALIAS,
7285 .access = PL2_RW, .accessfn = e2h_access,
7286 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
7287 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
7288 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
7289 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
7290 .type = ARM_CP_IO | ARM_CP_ALIAS,
7291 .access = PL2_RW, .accessfn = e2h_access,
7292 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
7293 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
7294 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7295 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
7296 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7297 .access = PL2_RW, .accessfn = e2h_access,
7298 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
7299 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7300 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
7301 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7302 .access = PL2_RW, .accessfn = e2h_access,
7303 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
7304 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7305 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
7306 .type = ARM_CP_IO | ARM_CP_ALIAS,
7307 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
7308 .access = PL2_RW, .accessfn = e2h_access,
7309 .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
7310 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7311 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
7312 .type = ARM_CP_IO | ARM_CP_ALIAS,
7313 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
7314 .access = PL2_RW, .accessfn = e2h_access,
7315 .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
7320 #ifndef CONFIG_USER_ONLY
7321 static const ARMCPRegInfo ats1e1_reginfo[] = {
7322 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
7323 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7324 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7325 .writefn = ats_write64 },
7326 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
7327 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7328 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7329 .writefn = ats_write64 },
7333 static const ARMCPRegInfo ats1cp_reginfo[] = {
7334 { .name = "ATS1CPRP",
7335 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7336 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7337 .writefn = ats_write },
7338 { .name = "ATS1CPWP",
7339 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7340 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7341 .writefn = ats_write },
7347 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7348 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7349 * is non-zero, which is never for ARMv7, optionally in ARMv8
7350 * and mandatorily for ARMv8.2 and up.
7351 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7352 * implementation is RAZ/WI we can ignore this detail, as we
7355 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
7356 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
7357 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
7358 .access = PL1_RW, .accessfn = access_tacr,
7359 .type = ARM_CP_CONST, .resetvalue = 0 },
7360 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
7361 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
7362 .access = PL2_RW, .type = ARM_CP_CONST,
7367 void register_cp_regs_for_features(ARMCPU *cpu)
7369 /* Register all the coprocessor registers based on feature bits */
7370 CPUARMState *env = &cpu->env;
7371 if (arm_feature(env, ARM_FEATURE_M)) {
7372 /* M profile has no coprocessor registers */
7376 define_arm_cp_regs(cpu, cp_reginfo);
7377 if (!arm_feature(env, ARM_FEATURE_V8)) {
7378 /* Must go early as it is full of wildcards that may be
7379 * overridden by later definitions.
7381 define_arm_cp_regs(cpu, not_v8_cp_reginfo);
7384 if (arm_feature(env, ARM_FEATURE_V6)) {
7385 /* The ID registers all have impdef reset values */
7386 ARMCPRegInfo v6_idregs[] = {
7387 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
7388 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
7389 .access = PL1_R, .type = ARM_CP_CONST,
7390 .accessfn = access_aa32_tid3,
7391 .resetvalue = cpu->isar.id_pfr0 },
7392 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7393 * the value of the GIC field until after we define these regs.
7395 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
7396 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
7397 .access = PL1_R, .type = ARM_CP_NO_RAW,
7398 .accessfn = access_aa32_tid3,
7399 .readfn = id_pfr1_read,
7400 .writefn = arm_cp_write_ignore },
7401 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
7402 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
7403 .access = PL1_R, .type = ARM_CP_CONST,
7404 .accessfn = access_aa32_tid3,
7405 .resetvalue = cpu->isar.id_dfr0 },
7406 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
7407 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
7408 .access = PL1_R, .type = ARM_CP_CONST,
7409 .accessfn = access_aa32_tid3,
7410 .resetvalue = cpu->id_afr0 },
7411 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
7412 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
7413 .access = PL1_R, .type = ARM_CP_CONST,
7414 .accessfn = access_aa32_tid3,
7415 .resetvalue = cpu->isar.id_mmfr0 },
7416 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
7417 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
7418 .access = PL1_R, .type = ARM_CP_CONST,
7419 .accessfn = access_aa32_tid3,
7420 .resetvalue = cpu->isar.id_mmfr1 },
7421 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
7422 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
7423 .access = PL1_R, .type = ARM_CP_CONST,
7424 .accessfn = access_aa32_tid3,
7425 .resetvalue = cpu->isar.id_mmfr2 },
7426 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
7427 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
7428 .access = PL1_R, .type = ARM_CP_CONST,
7429 .accessfn = access_aa32_tid3,
7430 .resetvalue = cpu->isar.id_mmfr3 },
7431 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
7432 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
7433 .access = PL1_R, .type = ARM_CP_CONST,
7434 .accessfn = access_aa32_tid3,
7435 .resetvalue = cpu->isar.id_isar0 },
7436 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
7437 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
7438 .access = PL1_R, .type = ARM_CP_CONST,
7439 .accessfn = access_aa32_tid3,
7440 .resetvalue = cpu->isar.id_isar1 },
7441 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
7442 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
7443 .access = PL1_R, .type = ARM_CP_CONST,
7444 .accessfn = access_aa32_tid3,
7445 .resetvalue = cpu->isar.id_isar2 },
7446 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
7447 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
7448 .access = PL1_R, .type = ARM_CP_CONST,
7449 .accessfn = access_aa32_tid3,
7450 .resetvalue = cpu->isar.id_isar3 },
7451 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
7452 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
7453 .access = PL1_R, .type = ARM_CP_CONST,
7454 .accessfn = access_aa32_tid3,
7455 .resetvalue = cpu->isar.id_isar4 },
7456 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
7457 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
7458 .access = PL1_R, .type = ARM_CP_CONST,
7459 .accessfn = access_aa32_tid3,
7460 .resetvalue = cpu->isar.id_isar5 },
7461 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
7462 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
7463 .access = PL1_R, .type = ARM_CP_CONST,
7464 .accessfn = access_aa32_tid3,
7465 .resetvalue = cpu->isar.id_mmfr4 },
7466 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
7467 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
7468 .access = PL1_R, .type = ARM_CP_CONST,
7469 .accessfn = access_aa32_tid3,
7470 .resetvalue = cpu->isar.id_isar6 },
7473 define_arm_cp_regs(cpu, v6_idregs);
7474 define_arm_cp_regs(cpu, v6_cp_reginfo);
7476 define_arm_cp_regs(cpu, not_v6_cp_reginfo);
7478 if (arm_feature(env, ARM_FEATURE_V6K)) {
7479 define_arm_cp_regs(cpu, v6k_cp_reginfo);
7481 if (arm_feature(env, ARM_FEATURE_V7MP) &&
7482 !arm_feature(env, ARM_FEATURE_PMSA)) {
7483 define_arm_cp_regs(cpu, v7mp_cp_reginfo);
7485 if (arm_feature(env, ARM_FEATURE_V7VE)) {
7486 define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
7488 if (arm_feature(env, ARM_FEATURE_V7)) {
7489 ARMCPRegInfo clidr = {
7490 .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
7491 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
7492 .access = PL1_R, .type = ARM_CP_CONST,
7493 .accessfn = access_aa64_tid2,
7494 .resetvalue = cpu->clidr
7496 define_one_arm_cp_reg(cpu, &clidr);
7497 define_arm_cp_regs(cpu, v7_cp_reginfo);
7498 define_debug_regs(cpu);
7499 define_pmu_regs(cpu);
7501 define_arm_cp_regs(cpu, not_v7_cp_reginfo);
7503 if (arm_feature(env, ARM_FEATURE_V8)) {
7504 /* AArch64 ID registers, which all have impdef reset values.
7505 * Note that within the ID register ranges the unused slots
7506 * must all RAZ, not UNDEF; future architecture versions may
7507 * define new registers here.
7509 ARMCPRegInfo v8_idregs[] = {
7511 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7512 * emulation because we don't know the right value for the
7513 * GIC field until after we define these regs.
7515 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
7516 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
7518 #ifdef CONFIG_USER_ONLY
7519 .type = ARM_CP_CONST,
7520 .resetvalue = cpu->isar.id_aa64pfr0
7522 .type = ARM_CP_NO_RAW,
7523 .accessfn = access_aa64_tid3,
7524 .readfn = id_aa64pfr0_read,
7525 .writefn = arm_cp_write_ignore
7528 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
7529 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
7530 .access = PL1_R, .type = ARM_CP_CONST,
7531 .accessfn = access_aa64_tid3,
7532 .resetvalue = cpu->isar.id_aa64pfr1},
7533 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7534 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
7535 .access = PL1_R, .type = ARM_CP_CONST,
7536 .accessfn = access_aa64_tid3,
7538 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7539 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
7540 .access = PL1_R, .type = ARM_CP_CONST,
7541 .accessfn = access_aa64_tid3,
7543 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
7544 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
7545 .access = PL1_R, .type = ARM_CP_CONST,
7546 .accessfn = access_aa64_tid3,
7547 /* At present, only SVEver == 0 is defined anyway. */
7549 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7550 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
7551 .access = PL1_R, .type = ARM_CP_CONST,
7552 .accessfn = access_aa64_tid3,
7554 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7555 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
7556 .access = PL1_R, .type = ARM_CP_CONST,
7557 .accessfn = access_aa64_tid3,
7559 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7560 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
7561 .access = PL1_R, .type = ARM_CP_CONST,
7562 .accessfn = access_aa64_tid3,
7564 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
7565 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
7566 .access = PL1_R, .type = ARM_CP_CONST,
7567 .accessfn = access_aa64_tid3,
7568 .resetvalue = cpu->isar.id_aa64dfr0 },
7569 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
7570 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
7571 .access = PL1_R, .type = ARM_CP_CONST,
7572 .accessfn = access_aa64_tid3,
7573 .resetvalue = cpu->isar.id_aa64dfr1 },
7574 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7575 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
7576 .access = PL1_R, .type = ARM_CP_CONST,
7577 .accessfn = access_aa64_tid3,
7579 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7580 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
7581 .access = PL1_R, .type = ARM_CP_CONST,
7582 .accessfn = access_aa64_tid3,
7584 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
7585 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
7586 .access = PL1_R, .type = ARM_CP_CONST,
7587 .accessfn = access_aa64_tid3,
7588 .resetvalue = cpu->id_aa64afr0 },
7589 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
7590 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
7591 .access = PL1_R, .type = ARM_CP_CONST,
7592 .accessfn = access_aa64_tid3,
7593 .resetvalue = cpu->id_aa64afr1 },
7594 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7595 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
7596 .access = PL1_R, .type = ARM_CP_CONST,
7597 .accessfn = access_aa64_tid3,
7599 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7600 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
7601 .access = PL1_R, .type = ARM_CP_CONST,
7602 .accessfn = access_aa64_tid3,
7604 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
7605 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
7606 .access = PL1_R, .type = ARM_CP_CONST,
7607 .accessfn = access_aa64_tid3,
7608 .resetvalue = cpu->isar.id_aa64isar0 },
7609 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
7610 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
7611 .access = PL1_R, .type = ARM_CP_CONST,
7612 .accessfn = access_aa64_tid3,
7613 .resetvalue = cpu->isar.id_aa64isar1 },
7614 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7615 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
7616 .access = PL1_R, .type = ARM_CP_CONST,
7617 .accessfn = access_aa64_tid3,
7619 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7620 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
7621 .access = PL1_R, .type = ARM_CP_CONST,
7622 .accessfn = access_aa64_tid3,
7624 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7625 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
7626 .access = PL1_R, .type = ARM_CP_CONST,
7627 .accessfn = access_aa64_tid3,
7629 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7630 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
7631 .access = PL1_R, .type = ARM_CP_CONST,
7632 .accessfn = access_aa64_tid3,
7634 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7635 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
7636 .access = PL1_R, .type = ARM_CP_CONST,
7637 .accessfn = access_aa64_tid3,
7639 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7640 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
7641 .access = PL1_R, .type = ARM_CP_CONST,
7642 .accessfn = access_aa64_tid3,
7644 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
7645 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
7646 .access = PL1_R, .type = ARM_CP_CONST,
7647 .accessfn = access_aa64_tid3,
7648 .resetvalue = cpu->isar.id_aa64mmfr0 },
7649 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
7650 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
7651 .access = PL1_R, .type = ARM_CP_CONST,
7652 .accessfn = access_aa64_tid3,
7653 .resetvalue = cpu->isar.id_aa64mmfr1 },
7654 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
7655 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
7656 .access = PL1_R, .type = ARM_CP_CONST,
7657 .accessfn = access_aa64_tid3,
7658 .resetvalue = cpu->isar.id_aa64mmfr2 },
7659 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7660 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
7661 .access = PL1_R, .type = ARM_CP_CONST,
7662 .accessfn = access_aa64_tid3,
7664 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7665 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
7666 .access = PL1_R, .type = ARM_CP_CONST,
7667 .accessfn = access_aa64_tid3,
7669 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7670 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
7671 .access = PL1_R, .type = ARM_CP_CONST,
7672 .accessfn = access_aa64_tid3,
7674 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7675 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
7676 .access = PL1_R, .type = ARM_CP_CONST,
7677 .accessfn = access_aa64_tid3,
7679 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7680 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
7681 .access = PL1_R, .type = ARM_CP_CONST,
7682 .accessfn = access_aa64_tid3,
7684 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
7685 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7686 .access = PL1_R, .type = ARM_CP_CONST,
7687 .accessfn = access_aa64_tid3,
7688 .resetvalue = cpu->isar.mvfr0 },
7689 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
7690 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7691 .access = PL1_R, .type = ARM_CP_CONST,
7692 .accessfn = access_aa64_tid3,
7693 .resetvalue = cpu->isar.mvfr1 },
7694 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
7695 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7696 .access = PL1_R, .type = ARM_CP_CONST,
7697 .accessfn = access_aa64_tid3,
7698 .resetvalue = cpu->isar.mvfr2 },
7699 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7700 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
7701 .access = PL1_R, .type = ARM_CP_CONST,
7702 .accessfn = access_aa64_tid3,
7704 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
7705 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
7706 .access = PL1_R, .type = ARM_CP_CONST,
7707 .accessfn = access_aa64_tid3,
7708 .resetvalue = cpu->isar.id_pfr2 },
7709 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7710 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
7711 .access = PL1_R, .type = ARM_CP_CONST,
7712 .accessfn = access_aa64_tid3,
7714 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7715 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
7716 .access = PL1_R, .type = ARM_CP_CONST,
7717 .accessfn = access_aa64_tid3,
7719 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7720 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
7721 .access = PL1_R, .type = ARM_CP_CONST,
7722 .accessfn = access_aa64_tid3,
7724 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
7725 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
7726 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7727 .resetvalue = extract64(cpu->pmceid0, 0, 32) },
7728 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
7729 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
7730 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7731 .resetvalue = cpu->pmceid0 },
7732 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
7733 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
7734 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7735 .resetvalue = extract64(cpu->pmceid1, 0, 32) },
7736 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
7737 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
7738 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7739 .resetvalue = cpu->pmceid1 },
7742 #ifdef CONFIG_USER_ONLY
7743 ARMCPRegUserSpaceInfo v8_user_idregs[] = {
7744 { .name = "ID_AA64PFR0_EL1",
7745 .exported_bits = 0x000f000f00ff0000,
7746 .fixed_bits = 0x0000000000000011 },
7747 { .name = "ID_AA64PFR1_EL1",
7748 .exported_bits = 0x00000000000000f0 },
7749 { .name = "ID_AA64PFR*_EL1_RESERVED",
7751 { .name = "ID_AA64ZFR0_EL1" },
7752 { .name = "ID_AA64MMFR0_EL1",
7753 .fixed_bits = 0x00000000ff000000 },
7754 { .name = "ID_AA64MMFR1_EL1" },
7755 { .name = "ID_AA64MMFR*_EL1_RESERVED",
7757 { .name = "ID_AA64DFR0_EL1",
7758 .fixed_bits = 0x0000000000000006 },
7759 { .name = "ID_AA64DFR1_EL1" },
7760 { .name = "ID_AA64DFR*_EL1_RESERVED",
7762 { .name = "ID_AA64AFR*",
7764 { .name = "ID_AA64ISAR0_EL1",
7765 .exported_bits = 0x00fffffff0fffff0 },
7766 { .name = "ID_AA64ISAR1_EL1",
7767 .exported_bits = 0x000000f0ffffffff },
7768 { .name = "ID_AA64ISAR*_EL1_RESERVED",
7770 REGUSERINFO_SENTINEL
7772 modify_arm_cp_regs(v8_idregs, v8_user_idregs);
7774 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7775 if (!arm_feature(env, ARM_FEATURE_EL3) &&
7776 !arm_feature(env, ARM_FEATURE_EL2)) {
7777 ARMCPRegInfo rvbar = {
7778 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
7779 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
7780 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
7782 define_one_arm_cp_reg(cpu, &rvbar);
7784 define_arm_cp_regs(cpu, v8_idregs);
7785 define_arm_cp_regs(cpu, v8_cp_reginfo);
7787 if (arm_feature(env, ARM_FEATURE_EL2)) {
7788 uint64_t vmpidr_def = mpidr_read_val(env);
7789 ARMCPRegInfo vpidr_regs[] = {
7790 { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
7791 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7792 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7793 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
7794 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
7795 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
7796 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7797 .access = PL2_RW, .resetvalue = cpu->midr,
7798 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
7799 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
7800 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7801 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7802 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
7803 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
7804 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
7805 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7807 .resetvalue = vmpidr_def,
7808 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
7811 define_arm_cp_regs(cpu, vpidr_regs);
7812 define_arm_cp_regs(cpu, el2_cp_reginfo);
7813 if (arm_feature(env, ARM_FEATURE_V8)) {
7814 define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
7816 if (cpu_isar_feature(aa64_sel2, cpu)) {
7817 define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
7819 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
7820 if (!arm_feature(env, ARM_FEATURE_EL3)) {
7821 ARMCPRegInfo rvbar = {
7822 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
7823 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
7824 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
7826 define_one_arm_cp_reg(cpu, &rvbar);
7829 /* If EL2 is missing but higher ELs are enabled, we need to
7830 * register the no_el2 reginfos.
7832 if (arm_feature(env, ARM_FEATURE_EL3)) {
7833 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
7834 * of MIDR_EL1 and MPIDR_EL1.
7836 ARMCPRegInfo vpidr_regs[] = {
7837 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
7838 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7839 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7840 .type = ARM_CP_CONST, .resetvalue = cpu->midr,
7841 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
7842 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
7843 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7844 .access = PL2_RW, .accessfn = access_el3_aa32ns,
7845 .type = ARM_CP_NO_RAW,
7846 .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
7849 define_arm_cp_regs(cpu, vpidr_regs);
7850 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
7851 if (arm_feature(env, ARM_FEATURE_V8)) {
7852 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
7856 if (arm_feature(env, ARM_FEATURE_EL3)) {
7857 define_arm_cp_regs(cpu, el3_cp_reginfo);
7858 ARMCPRegInfo el3_regs[] = {
7859 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
7860 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
7861 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
7862 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
7863 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
7865 .raw_writefn = raw_write, .writefn = sctlr_write,
7866 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
7867 .resetvalue = cpu->reset_sctlr },
7871 define_arm_cp_regs(cpu, el3_regs);
7873 /* The behaviour of NSACR is sufficiently various that we don't
7874 * try to describe it in a single reginfo:
7875 * if EL3 is 64 bit, then trap to EL3 from S EL1,
7876 * reads as constant 0xc00 from NS EL1 and NS EL2
7877 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
7878 * if v7 without EL3, register doesn't exist
7879 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
7881 if (arm_feature(env, ARM_FEATURE_EL3)) {
7882 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
7883 ARMCPRegInfo nsacr = {
7884 .name = "NSACR", .type = ARM_CP_CONST,
7885 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7886 .access = PL1_RW, .accessfn = nsacr_access,
7889 define_one_arm_cp_reg(cpu, &nsacr);
7891 ARMCPRegInfo nsacr = {
7893 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7894 .access = PL3_RW | PL1_R,
7896 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
7898 define_one_arm_cp_reg(cpu, &nsacr);
7901 if (arm_feature(env, ARM_FEATURE_V8)) {
7902 ARMCPRegInfo nsacr = {
7903 .name = "NSACR", .type = ARM_CP_CONST,
7904 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7908 define_one_arm_cp_reg(cpu, &nsacr);
7912 if (arm_feature(env, ARM_FEATURE_PMSA)) {
7913 if (arm_feature(env, ARM_FEATURE_V6)) {
7914 /* PMSAv6 not implemented */
7915 assert(arm_feature(env, ARM_FEATURE_V7));
7916 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
7917 define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
7919 define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
7922 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
7923 define_arm_cp_regs(cpu, vmsa_cp_reginfo);
7924 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */
7925 if (cpu_isar_feature(aa32_hpd, cpu)) {
7926 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
7929 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
7930 define_arm_cp_regs(cpu, t2ee_cp_reginfo);
7932 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
7933 define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
7935 if (arm_feature(env, ARM_FEATURE_VAPA)) {
7936 define_arm_cp_regs(cpu, vapa_cp_reginfo);
7938 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
7939 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
7941 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
7942 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
7944 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
7945 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
7947 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
7948 define_arm_cp_regs(cpu, omap_cp_reginfo);
7950 if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
7951 define_arm_cp_regs(cpu, strongarm_cp_reginfo);
7953 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7954 define_arm_cp_regs(cpu, xscale_cp_reginfo);
7956 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
7957 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
7959 if (arm_feature(env, ARM_FEATURE_LPAE)) {
7960 define_arm_cp_regs(cpu, lpae_cp_reginfo);
7962 if (cpu_isar_feature(aa32_jazelle, cpu)) {
7963 define_arm_cp_regs(cpu, jazelle_regs);
7965 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
7966 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7967 * be read-only (ie write causes UNDEF exception).
7970 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
7971 /* Pre-v8 MIDR space.
7972 * Note that the MIDR isn't a simple constant register because
7973 * of the TI925 behaviour where writes to another register can
7974 * cause the MIDR value to change.
7976 * Unimplemented registers in the c15 0 0 0 space default to
7977 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7978 * and friends override accordingly.
7981 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7982 .access = PL1_R, .resetvalue = cpu->midr,
7983 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
7984 .readfn = midr_read,
7985 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7986 .type = ARM_CP_OVERRIDE },
7987 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7989 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
7990 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7992 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
7993 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7995 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
7996 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7998 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
7999 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8001 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
8002 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
8005 ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
8006 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
8007 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
8008 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
8009 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
8010 .readfn = midr_read },
8011 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
8012 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
8013 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8014 .access = PL1_R, .resetvalue = cpu->midr },
8015 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
8016 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
8017 .access = PL1_R, .resetvalue = cpu->midr },
8018 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
8019 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
8021 .accessfn = access_aa64_tid1,
8022 .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
8025 ARMCPRegInfo id_cp_reginfo[] = {
8026 /* These are common to v8 and pre-v8 */
8028 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
8029 .access = PL1_R, .accessfn = ctr_el0_access,
8030 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8031 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
8032 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
8033 .access = PL0_R, .accessfn = ctr_el0_access,
8034 .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
8035 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
8037 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
8039 .accessfn = access_aa32_tid1,
8040 .type = ARM_CP_CONST, .resetvalue = 0 },
8043 /* TLBTR is specific to VMSA */
8044 ARMCPRegInfo id_tlbtr_reginfo = {
8046 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
8048 .accessfn = access_aa32_tid1,
8049 .type = ARM_CP_CONST, .resetvalue = 0,
8051 /* MPUIR is specific to PMSA V6+ */
8052 ARMCPRegInfo id_mpuir_reginfo = {
8054 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
8055 .access = PL1_R, .type = ARM_CP_CONST,
8056 .resetvalue = cpu->pmsav7_dregion << 8
8058 ARMCPRegInfo crn0_wi_reginfo = {
8059 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
8060 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
8061 .type = ARM_CP_NOP | ARM_CP_OVERRIDE
8063 #ifdef CONFIG_USER_ONLY
8064 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
8065 { .name = "MIDR_EL1",
8066 .exported_bits = 0x00000000ffffffff },
8067 { .name = "REVIDR_EL1" },
8068 REGUSERINFO_SENTINEL
8070 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
8072 if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
8073 arm_feature(env, ARM_FEATURE_STRONGARM)) {
8075 /* Register the blanket "writes ignored" value first to cover the
8076 * whole space. Then update the specific ID registers to allow write
8077 * access, so that they ignore writes rather than causing them to
8080 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
8081 for (r = id_pre_v8_midr_cp_reginfo;
8082 r->type != ARM_CP_SENTINEL; r++) {
8085 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
8088 id_mpuir_reginfo.access = PL1_RW;
8089 id_tlbtr_reginfo.access = PL1_RW;
8091 if (arm_feature(env, ARM_FEATURE_V8)) {
8092 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
8094 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
8096 define_arm_cp_regs(cpu, id_cp_reginfo);
8097 if (!arm_feature(env, ARM_FEATURE_PMSA)) {
8098 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
8099 } else if (arm_feature(env, ARM_FEATURE_V7)) {
8100 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
8104 if (arm_feature(env, ARM_FEATURE_MPIDR)) {
8105 ARMCPRegInfo mpidr_cp_reginfo[] = {
8106 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
8107 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
8108 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
8111 #ifdef CONFIG_USER_ONLY
8112 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
8113 { .name = "MPIDR_EL1",
8114 .fixed_bits = 0x0000000080000000 },
8115 REGUSERINFO_SENTINEL
8117 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
8119 define_arm_cp_regs(cpu, mpidr_cp_reginfo);
8122 if (arm_feature(env, ARM_FEATURE_AUXCR)) {
8123 ARMCPRegInfo auxcr_reginfo[] = {
8124 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
8125 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
8126 .access = PL1_RW, .accessfn = access_tacr,
8127 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
8128 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
8129 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
8130 .access = PL2_RW, .type = ARM_CP_CONST,
8132 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
8133 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
8134 .access = PL3_RW, .type = ARM_CP_CONST,
8138 define_arm_cp_regs(cpu, auxcr_reginfo);
8139 if (cpu_isar_feature(aa32_ac2, cpu)) {
8140 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
8144 if (arm_feature(env, ARM_FEATURE_CBAR)) {
8146 * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8147 * There are two flavours:
8148 * (1) older 32-bit only cores have a simple 32-bit CBAR
8149 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8150 * 32-bit register visible to AArch32 at a different encoding
8151 * to the "flavour 1" register and with the bits rearranged to
8152 * be able to squash a 64-bit address into the 32-bit view.
8153 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8154 * in future if we support AArch32-only configs of some of the
8155 * AArch64 cores we might need to add a specific feature flag
8156 * to indicate cores with "flavour 2" CBAR.
8158 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8159 /* 32 bit view is [31:18] 0...0 [43:32]. */
8160 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
8161 | extract64(cpu->reset_cbar, 32, 12);
8162 ARMCPRegInfo cbar_reginfo[] = {
8164 .type = ARM_CP_CONST,
8165 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
8166 .access = PL1_R, .resetvalue = cbar32 },
8167 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
8168 .type = ARM_CP_CONST,
8169 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
8170 .access = PL1_R, .resetvalue = cpu->reset_cbar },
8173 /* We don't implement a r/w 64 bit CBAR currently */
8174 assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
8175 define_arm_cp_regs(cpu, cbar_reginfo);
8177 ARMCPRegInfo cbar = {
8179 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
8180 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
8181 .fieldoffset = offsetof(CPUARMState,
8182 cp15.c15_config_base_address)
8184 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
8185 cbar.access = PL1_R;
8186 cbar.fieldoffset = 0;
8187 cbar.type = ARM_CP_CONST;
8189 define_one_arm_cp_reg(cpu, &cbar);
8193 if (arm_feature(env, ARM_FEATURE_VBAR)) {
8194 ARMCPRegInfo vbar_cp_reginfo[] = {
8195 { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
8196 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
8197 .access = PL1_RW, .writefn = vbar_write,
8198 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
8199 offsetof(CPUARMState, cp15.vbar_ns) },
8203 define_arm_cp_regs(cpu, vbar_cp_reginfo);
8206 /* Generic registers whose values depend on the implementation */
8208 ARMCPRegInfo sctlr = {
8209 .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
8210 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
8211 .access = PL1_RW, .accessfn = access_tvm_trvm,
8212 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
8213 offsetof(CPUARMState, cp15.sctlr_ns) },
8214 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
8215 .raw_writefn = raw_write,
8217 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8218 /* Normally we would always end the TB on an SCTLR write, but Linux
8219 * arch/arm/mach-pxa/sleep.S expects two instructions following
8220 * an MMU enable to execute from cache. Imitate this behaviour.
8222 sctlr.type |= ARM_CP_SUPPRESS_TB_END;
8224 define_one_arm_cp_reg(cpu, &sctlr);
8227 if (cpu_isar_feature(aa64_lor, cpu)) {
8228 define_arm_cp_regs(cpu, lor_reginfo);
8230 if (cpu_isar_feature(aa64_pan, cpu)) {
8231 define_one_arm_cp_reg(cpu, &pan_reginfo);
8233 #ifndef CONFIG_USER_ONLY
8234 if (cpu_isar_feature(aa64_ats1e1, cpu)) {
8235 define_arm_cp_regs(cpu, ats1e1_reginfo);
8237 if (cpu_isar_feature(aa32_ats1e1, cpu)) {
8238 define_arm_cp_regs(cpu, ats1cp_reginfo);
8241 if (cpu_isar_feature(aa64_uao, cpu)) {
8242 define_one_arm_cp_reg(cpu, &uao_reginfo);
8245 if (cpu_isar_feature(aa64_dit, cpu)) {
8246 define_one_arm_cp_reg(cpu, &dit_reginfo);
8249 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8250 define_arm_cp_regs(cpu, vhe_reginfo);
8253 if (cpu_isar_feature(aa64_sve, cpu)) {
8254 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
8255 if (arm_feature(env, ARM_FEATURE_EL2)) {
8256 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
8258 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
8260 if (arm_feature(env, ARM_FEATURE_EL3)) {
8261 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
8265 #ifdef TARGET_AARCH64
8266 if (cpu_isar_feature(aa64_pauth, cpu)) {
8267 define_arm_cp_regs(cpu, pauth_reginfo);
8269 if (cpu_isar_feature(aa64_rndr, cpu)) {
8270 define_arm_cp_regs(cpu, rndr_reginfo);
8272 #ifndef CONFIG_USER_ONLY
8273 /* Data Cache clean instructions up to PoP */
8274 if (cpu_isar_feature(aa64_dcpop, cpu)) {
8275 define_one_arm_cp_reg(cpu, dcpop_reg);
8277 if (cpu_isar_feature(aa64_dcpodp, cpu)) {
8278 define_one_arm_cp_reg(cpu, dcpodp_reg);
8281 #endif /*CONFIG_USER_ONLY*/
8284 * If full MTE is enabled, add all of the system registers.
8285 * If only "instructions available at EL0" are enabled,
8286 * then define only a RAZ/WI version of PSTATE.TCO.
8288 if (cpu_isar_feature(aa64_mte, cpu)) {
8289 define_arm_cp_regs(cpu, mte_reginfo);
8290 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8291 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
8292 define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
8293 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8297 if (cpu_isar_feature(any_predinv, cpu)) {
8298 define_arm_cp_regs(cpu, predinv_reginfo);
8301 if (cpu_isar_feature(any_ccidx, cpu)) {
8302 define_arm_cp_regs(cpu, ccsidr2_reginfo);
8305 #ifndef CONFIG_USER_ONLY
8307 * Register redirections and aliases must be done last,
8308 * after the registers from the other extensions have been defined.
8310 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8311 define_arm_vh_e2h_redirects_aliases(cpu);
8316 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
8318 CPUState *cs = CPU(cpu);
8319 CPUARMState *env = &cpu->env;
8321 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8323 * The lower part of each SVE register aliases to the FPU
8324 * registers so we don't need to include both.
8326 #ifdef TARGET_AARCH64
8327 if (isar_feature_aa64_sve(&cpu->isar)) {
8328 gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg,
8329 arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs),
8330 "sve-registers.xml", 0);
8334 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
8335 aarch64_fpu_gdb_set_reg,
8336 34, "aarch64-fpu.xml", 0);
8338 } else if (arm_feature(env, ARM_FEATURE_NEON)) {
8339 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
8340 51, "arm-neon.xml", 0);
8341 } else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
8342 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
8343 35, "arm-vfp3.xml", 0);
8344 } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
8345 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
8346 19, "arm-vfp.xml", 0);
8348 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
8349 arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
8350 "system-registers.xml", 0);
8354 /* Sort alphabetically by type name, except for "any". */
8355 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
8357 ObjectClass *class_a = (ObjectClass *)a;
8358 ObjectClass *class_b = (ObjectClass *)b;
8359 const char *name_a, *name_b;
8361 name_a = object_class_get_name(class_a);
8362 name_b = object_class_get_name(class_b);
8363 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
8365 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
8368 return strcmp(name_a, name_b);
8372 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
8374 ObjectClass *oc = data;
8375 const char *typename;
8378 typename = object_class_get_name(oc);
8379 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
8380 qemu_printf(" %s\n", name);
8384 void arm_cpu_list(void)
8388 list = object_class_get_list(TYPE_ARM_CPU, false);
8389 list = g_slist_sort(list, arm_cpu_list_compare);
8390 qemu_printf("Available CPUs:\n");
8391 g_slist_foreach(list, arm_cpu_list_entry, NULL);
8395 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
8397 ObjectClass *oc = data;
8398 CpuDefinitionInfoList **cpu_list = user_data;
8399 CpuDefinitionInfo *info;
8400 const char *typename;
8402 typename = object_class_get_name(oc);
8403 info = g_malloc0(sizeof(*info));
8404 info->name = g_strndup(typename,
8405 strlen(typename) - strlen("-" TYPE_ARM_CPU));
8406 info->q_typename = g_strdup(typename);
8408 QAPI_LIST_PREPEND(*cpu_list, info);
8411 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
8413 CpuDefinitionInfoList *cpu_list = NULL;
8416 list = object_class_get_list(TYPE_ARM_CPU, false);
8417 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
8423 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
8424 void *opaque, int state, int secstate,
8425 int crm, int opc1, int opc2,
8428 /* Private utility function for define_one_arm_cp_reg_with_opaque():
8429 * add a single reginfo struct to the hash table.
8431 uint32_t *key = g_new(uint32_t, 1);
8432 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
8433 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
8434 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
8436 r2->name = g_strdup(name);
8437 /* Reset the secure state to the specific incoming state. This is
8438 * necessary as the register may have been defined with both states.
8440 r2->secure = secstate;
8442 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
8443 /* Register is banked (using both entries in array).
8444 * Overwriting fieldoffset as the array is only used to define
8445 * banked registers but later only fieldoffset is used.
8447 r2->fieldoffset = r->bank_fieldoffsets[ns];
8450 if (state == ARM_CP_STATE_AA32) {
8451 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
8452 /* If the register is banked then we don't need to migrate or
8453 * reset the 32-bit instance in certain cases:
8455 * 1) If the register has both 32-bit and 64-bit instances then we
8456 * can count on the 64-bit instance taking care of the
8458 * 2) If ARMv8 is enabled then we can count on a 64-bit version
8459 * taking care of the secure bank. This requires that separate
8460 * 32 and 64-bit definitions are provided.
8462 if ((r->state == ARM_CP_STATE_BOTH && ns) ||
8463 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
8464 r2->type |= ARM_CP_ALIAS;
8466 } else if ((secstate != r->secure) && !ns) {
8467 /* The register is not banked so we only want to allow migration of
8468 * the non-secure instance.
8470 r2->type |= ARM_CP_ALIAS;
8473 if (r->state == ARM_CP_STATE_BOTH) {
8474 /* We assume it is a cp15 register if the .cp field is left unset.
8480 #ifdef HOST_WORDS_BIGENDIAN
8481 if (r2->fieldoffset) {
8482 r2->fieldoffset += sizeof(uint32_t);
8487 if (state == ARM_CP_STATE_AA64) {
8488 /* To allow abbreviation of ARMCPRegInfo
8489 * definitions, we treat cp == 0 as equivalent to
8490 * the value for "standard guest-visible sysreg".
8491 * STATE_BOTH definitions are also always "standard
8492 * sysreg" in their AArch64 view (the .cp value may
8493 * be non-zero for the benefit of the AArch32 view).
8495 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
8496 r2->cp = CP_REG_ARM64_SYSREG_CP;
8498 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
8499 r2->opc0, opc1, opc2);
8501 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
8504 r2->opaque = opaque;
8506 /* reginfo passed to helpers is correct for the actual access,
8507 * and is never ARM_CP_STATE_BOTH:
8510 /* Make sure reginfo passed to helpers for wildcarded regs
8511 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
8516 /* By convention, for wildcarded registers only the first
8517 * entry is used for migration; the others are marked as
8518 * ALIAS so we don't try to transfer the register
8519 * multiple times. Special registers (ie NOP/WFI) are
8520 * never migratable and not even raw-accessible.
8522 if ((r->type & ARM_CP_SPECIAL)) {
8523 r2->type |= ARM_CP_NO_RAW;
8525 if (((r->crm == CP_ANY) && crm != 0) ||
8526 ((r->opc1 == CP_ANY) && opc1 != 0) ||
8527 ((r->opc2 == CP_ANY) && opc2 != 0)) {
8528 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
8531 /* Check that raw accesses are either forbidden or handled. Note that
8532 * we can't assert this earlier because the setup of fieldoffset for
8533 * banked registers has to be done first.
8535 if (!(r2->type & ARM_CP_NO_RAW)) {
8536 assert(!raw_accessors_invalid(r2));
8539 /* Overriding of an existing definition must be explicitly
8542 if (!(r->type & ARM_CP_OVERRIDE)) {
8543 ARMCPRegInfo *oldreg;
8544 oldreg = g_hash_table_lookup(cpu->cp_regs, key);
8545 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
8546 fprintf(stderr, "Register redefined: cp=%d %d bit "
8547 "crn=%d crm=%d opc1=%d opc2=%d, "
8548 "was %s, now %s\n", r2->cp, 32 + 32 * is64,
8549 r2->crn, r2->crm, r2->opc1, r2->opc2,
8550 oldreg->name, r2->name);
8551 g_assert_not_reached();
8554 g_hash_table_insert(cpu->cp_regs, key, r2);
8558 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
8559 const ARMCPRegInfo *r, void *opaque)
8561 /* Define implementations of coprocessor registers.
8562 * We store these in a hashtable because typically
8563 * there are less than 150 registers in a space which
8564 * is 16*16*16*8*8 = 262144 in size.
8565 * Wildcarding is supported for the crm, opc1 and opc2 fields.
8566 * If a register is defined twice then the second definition is
8567 * used, so this can be used to define some generic registers and
8568 * then override them with implementation specific variations.
8569 * At least one of the original and the second definition should
8570 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8571 * against accidental use.
8573 * The state field defines whether the register is to be
8574 * visible in the AArch32 or AArch64 execution state. If the
8575 * state is set to ARM_CP_STATE_BOTH then we synthesise a
8576 * reginfo structure for the AArch32 view, which sees the lower
8577 * 32 bits of the 64 bit register.
8579 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8580 * be wildcarded. AArch64 registers are always considered to be 64
8581 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8582 * the register, if any.
8584 int crm, opc1, opc2, state;
8585 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
8586 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
8587 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
8588 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
8589 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
8590 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
8591 /* 64 bit registers have only CRm and Opc1 fields */
8592 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
8593 /* op0 only exists in the AArch64 encodings */
8594 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
8595 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8596 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
8598 * This API is only for Arm's system coprocessors (14 and 15) or
8599 * (M-profile or v7A-and-earlier only) for implementation defined
8600 * coprocessors in the range 0..7. Our decode assumes this, since
8601 * 8..13 can be used for other insns including VFP and Neon. See
8602 * valid_cp() in translate.c. Assert here that we haven't tried
8603 * to use an invalid coprocessor number.
8606 case ARM_CP_STATE_BOTH:
8607 /* 0 has a special meaning, but otherwise the same rules as AA32. */
8612 case ARM_CP_STATE_AA32:
8613 if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
8614 !arm_feature(&cpu->env, ARM_FEATURE_M)) {
8615 assert(r->cp >= 14 && r->cp <= 15);
8617 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
8620 case ARM_CP_STATE_AA64:
8621 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
8624 g_assert_not_reached();
8626 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8627 * encodes a minimum access level for the register. We roll this
8628 * runtime check into our general permission check code, so check
8629 * here that the reginfo's specified permissions are strict enough
8630 * to encompass the generic architectural permission check.
8632 if (r->state != ARM_CP_STATE_AA32) {
8636 /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8637 mask = PL0U_R | PL1_RW;
8657 /* min_EL EL1, secure mode only (we don't check the latter) */
8661 /* broken reginfo with out-of-range opc1 */
8665 /* assert our permissions are not too lax (stricter is fine) */
8666 assert((r->access & ~mask) == 0);
8669 /* Check that the register definition has enough info to handle
8670 * reads and writes if they are permitted.
8672 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
8673 if (r->access & PL3_R) {
8674 assert((r->fieldoffset ||
8675 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8678 if (r->access & PL3_W) {
8679 assert((r->fieldoffset ||
8680 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8684 /* Bad type field probably means missing sentinel at end of reg list */
8685 assert(cptype_valid(r->type));
8686 for (crm = crmmin; crm <= crmmax; crm++) {
8687 for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
8688 for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
8689 for (state = ARM_CP_STATE_AA32;
8690 state <= ARM_CP_STATE_AA64; state++) {
8691 if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
8694 if (state == ARM_CP_STATE_AA32) {
8695 /* Under AArch32 CP registers can be common
8696 * (same for secure and non-secure world) or banked.
8700 switch (r->secure) {
8701 case ARM_CP_SECSTATE_S:
8702 case ARM_CP_SECSTATE_NS:
8703 add_cpreg_to_hashtable(cpu, r, opaque, state,
8704 r->secure, crm, opc1, opc2,
8708 name = g_strdup_printf("%s_S", r->name);
8709 add_cpreg_to_hashtable(cpu, r, opaque, state,
8711 crm, opc1, opc2, name);
8713 add_cpreg_to_hashtable(cpu, r, opaque, state,
8715 crm, opc1, opc2, r->name);
8719 /* AArch64 registers get mapped to non-secure instance
8721 add_cpreg_to_hashtable(cpu, r, opaque, state,
8723 crm, opc1, opc2, r->name);
8731 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
8732 const ARMCPRegInfo *regs, void *opaque)
8734 /* Define a whole list of registers */
8735 const ARMCPRegInfo *r;
8736 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
8737 define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
8742 * Modify ARMCPRegInfo for access from userspace.
8744 * This is a data driven modification directed by
8745 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8746 * user-space cannot alter any values and dynamic values pertaining to
8747 * execution state are hidden from user space view anyway.
8749 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
8751 const ARMCPRegUserSpaceInfo *m;
8754 for (m = mods; m->name; m++) {
8755 GPatternSpec *pat = NULL;
8757 pat = g_pattern_spec_new(m->name);
8759 for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
8760 if (pat && g_pattern_match_string(pat, r->name)) {
8761 r->type = ARM_CP_CONST;
8765 } else if (strcmp(r->name, m->name) == 0) {
8766 r->type = ARM_CP_CONST;
8768 r->resetvalue &= m->exported_bits;
8769 r->resetvalue |= m->fixed_bits;
8774 g_pattern_spec_free(pat);
8779 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
8781 return g_hash_table_lookup(cpregs, &encoded_cp);
8784 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
8787 /* Helper coprocessor write function for write-ignore registers */
8790 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
8792 /* Helper coprocessor write function for read-as-zero registers */
8796 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
8798 /* Helper coprocessor reset function for do-nothing-on-reset registers */
8801 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
8803 /* Return true if it is not valid for us to switch to
8804 * this CPU mode (ie all the UNPREDICTABLE cases in
8805 * the ARM ARM CPSRWriteByInstr pseudocode).
8808 /* Changes to or from Hyp via MSR and CPS are illegal. */
8809 if (write_type == CPSRWriteByInstr &&
8810 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
8811 mode == ARM_CPU_MODE_HYP)) {
8816 case ARM_CPU_MODE_USR:
8818 case ARM_CPU_MODE_SYS:
8819 case ARM_CPU_MODE_SVC:
8820 case ARM_CPU_MODE_ABT:
8821 case ARM_CPU_MODE_UND:
8822 case ARM_CPU_MODE_IRQ:
8823 case ARM_CPU_MODE_FIQ:
8824 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8825 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8827 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8828 * and CPS are treated as illegal mode changes.
8830 if (write_type == CPSRWriteByInstr &&
8831 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
8832 (arm_hcr_el2_eff(env) & HCR_TGE)) {
8836 case ARM_CPU_MODE_HYP:
8837 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
8838 case ARM_CPU_MODE_MON:
8839 return arm_current_el(env) < 3;
8845 uint32_t cpsr_read(CPUARMState *env)
8848 ZF = (env->ZF == 0);
8849 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
8850 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
8851 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
8852 | ((env->condexec_bits & 0xfc) << 8)
8853 | (env->GE << 16) | (env->daif & CPSR_AIF);
8856 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
8857 CPSRWriteType write_type)
8859 uint32_t changed_daif;
8861 if (mask & CPSR_NZCV) {
8862 env->ZF = (~val) & CPSR_Z;
8864 env->CF = (val >> 29) & 1;
8865 env->VF = (val << 3) & 0x80000000;
8868 env->QF = ((val & CPSR_Q) != 0);
8870 env->thumb = ((val & CPSR_T) != 0);
8871 if (mask & CPSR_IT_0_1) {
8872 env->condexec_bits &= ~3;
8873 env->condexec_bits |= (val >> 25) & 3;
8875 if (mask & CPSR_IT_2_7) {
8876 env->condexec_bits &= 3;
8877 env->condexec_bits |= (val >> 8) & 0xfc;
8879 if (mask & CPSR_GE) {
8880 env->GE = (val >> 16) & 0xf;
8883 /* In a V7 implementation that includes the security extensions but does
8884 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8885 * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8886 * bits respectively.
8888 * In a V8 implementation, it is permitted for privileged software to
8889 * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8891 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
8892 arm_feature(env, ARM_FEATURE_EL3) &&
8893 !arm_feature(env, ARM_FEATURE_EL2) &&
8894 !arm_is_secure(env)) {
8896 changed_daif = (env->daif ^ val) & mask;
8898 if (changed_daif & CPSR_A) {
8899 /* Check to see if we are allowed to change the masking of async
8900 * abort exceptions from a non-secure state.
8902 if (!(env->cp15.scr_el3 & SCR_AW)) {
8903 qemu_log_mask(LOG_GUEST_ERROR,
8904 "Ignoring attempt to switch CPSR_A flag from "
8905 "non-secure world with SCR.AW bit clear\n");
8910 if (changed_daif & CPSR_F) {
8911 /* Check to see if we are allowed to change the masking of FIQ
8912 * exceptions from a non-secure state.
8914 if (!(env->cp15.scr_el3 & SCR_FW)) {
8915 qemu_log_mask(LOG_GUEST_ERROR,
8916 "Ignoring attempt to switch CPSR_F flag from "
8917 "non-secure world with SCR.FW bit clear\n");
8921 /* Check whether non-maskable FIQ (NMFI) support is enabled.
8922 * If this bit is set software is not allowed to mask
8923 * FIQs, but is allowed to set CPSR_F to 0.
8925 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
8927 qemu_log_mask(LOG_GUEST_ERROR,
8928 "Ignoring attempt to enable CPSR_F flag "
8929 "(non-maskable FIQ [NMFI] support enabled)\n");
8935 env->daif &= ~(CPSR_AIF & mask);
8936 env->daif |= val & CPSR_AIF & mask;
8938 if (write_type != CPSRWriteRaw &&
8939 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
8940 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
8941 /* Note that we can only get here in USR mode if this is a
8942 * gdb stub write; for this case we follow the architectural
8943 * behaviour for guest writes in USR mode of ignoring an attempt
8944 * to switch mode. (Those are caught by translate.c for writes
8945 * triggered by guest instructions.)
8948 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
8949 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8950 * v7, and has defined behaviour in v8:
8951 * + leave CPSR.M untouched
8952 * + allow changes to the other CPSR fields
8954 * For user changes via the GDB stub, we don't set PSTATE.IL,
8955 * as this would be unnecessarily harsh for a user error.
8958 if (write_type != CPSRWriteByGDBStub &&
8959 arm_feature(env, ARM_FEATURE_V8)) {
8963 qemu_log_mask(LOG_GUEST_ERROR,
8964 "Illegal AArch32 mode switch attempt from %s to %s\n",
8965 aarch32_mode_name(env->uncached_cpsr),
8966 aarch32_mode_name(val));
8968 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
8969 write_type == CPSRWriteExceptionReturn ?
8970 "Exception return from AArch32" :
8971 "AArch32 mode switch from",
8972 aarch32_mode_name(env->uncached_cpsr),
8973 aarch32_mode_name(val), env->regs[15]);
8974 switch_mode(env, val & CPSR_M);
8977 mask &= ~CACHED_CPSR_BITS;
8978 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
8981 /* Sign/zero extend */
8982 uint32_t HELPER(sxtb16)(uint32_t x)
8985 res = (uint16_t)(int8_t)x;
8986 res |= (uint32_t)(int8_t)(x >> 16) << 16;
8990 uint32_t HELPER(uxtb16)(uint32_t x)
8993 res = (uint16_t)(uint8_t)x;
8994 res |= (uint32_t)(uint8_t)(x >> 16) << 16;
8998 int32_t HELPER(sdiv)(int32_t num, int32_t den)
9002 if (num == INT_MIN && den == -1)
9007 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
9014 uint32_t HELPER(rbit)(uint32_t x)
9019 #ifdef CONFIG_USER_ONLY
9021 static void switch_mode(CPUARMState *env, int mode)
9023 ARMCPU *cpu = env_archcpu(env);
9025 if (mode != ARM_CPU_MODE_USR) {
9026 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
9030 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9031 uint32_t cur_el, bool secure)
9036 void aarch64_sync_64_to_32(CPUARMState *env)
9038 g_assert_not_reached();
9043 static void switch_mode(CPUARMState *env, int mode)
9048 old_mode = env->uncached_cpsr & CPSR_M;
9049 if (mode == old_mode)
9052 if (old_mode == ARM_CPU_MODE_FIQ) {
9053 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
9054 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
9055 } else if (mode == ARM_CPU_MODE_FIQ) {
9056 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
9057 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
9060 i = bank_number(old_mode);
9061 env->banked_r13[i] = env->regs[13];
9062 env->banked_spsr[i] = env->spsr;
9064 i = bank_number(mode);
9065 env->regs[13] = env->banked_r13[i];
9066 env->spsr = env->banked_spsr[i];
9068 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
9069 env->regs[14] = env->banked_r14[r14_bank_number(mode)];
9072 /* Physical Interrupt Target EL Lookup Table
9074 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
9076 * The below multi-dimensional table is used for looking up the target
9077 * exception level given numerous condition criteria. Specifically, the
9078 * target EL is based on SCR and HCR routing controls as well as the
9079 * currently executing EL and secure state.
9082 * target_el_table[2][2][2][2][2][4]
9083 * | | | | | +--- Current EL
9084 * | | | | +------ Non-secure(0)/Secure(1)
9085 * | | | +--------- HCR mask override
9086 * | | +------------ SCR exec state control
9087 * | +--------------- SCR mask override
9088 * +------------------ 32-bit(0)/64-bit(1) EL3
9090 * The table values are as such:
9094 * The ARM ARM target EL table includes entries indicating that an "exception
9095 * is not taken". The two cases where this is applicable are:
9096 * 1) An exception is taken from EL3 but the SCR does not have the exception
9098 * 2) An exception is taken from EL2 but the HCR does not have the exception
9100 * In these two cases, the below table contain a target of EL1. This value is
9101 * returned as it is expected that the consumer of the table data will check
9102 * for "target EL >= current EL" to ensure the exception is not taken.
9106 * BIT IRQ IMO Non-secure Secure
9107 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
9109 static const int8_t target_el_table[2][2][2][2][2][4] = {
9110 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9111 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
9112 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
9113 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
9114 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9115 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
9116 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
9117 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
9118 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
9119 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},},
9120 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },},
9121 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},},
9122 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
9123 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
9124 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},
9125 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},},
9129 * Determine the target EL for physical exceptions
9131 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9132 uint32_t cur_el, bool secure)
9134 CPUARMState *env = cs->env_ptr;
9139 /* Is the highest EL AArch64? */
9140 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
9143 if (arm_feature(env, ARM_FEATURE_EL3)) {
9144 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
9146 /* Either EL2 is the highest EL (and so the EL2 register width
9147 * is given by is64); or there is no EL2 or EL3, in which case
9148 * the value of 'rw' does not affect the table lookup anyway.
9153 hcr_el2 = arm_hcr_el2_eff(env);
9156 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
9157 hcr = hcr_el2 & HCR_IMO;
9160 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
9161 hcr = hcr_el2 & HCR_FMO;
9164 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
9165 hcr = hcr_el2 & HCR_AMO;
9170 * For these purposes, TGE and AMO/IMO/FMO both force the
9171 * interrupt to EL2. Fold TGE into the bit extracted above.
9173 hcr |= (hcr_el2 & HCR_TGE) != 0;
9175 /* Perform a table-lookup for the target EL given the current state */
9176 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
9178 assert(target_el > 0);
9183 void arm_log_exception(int idx)
9185 if (qemu_loglevel_mask(CPU_LOG_INT)) {
9186 const char *exc = NULL;
9187 static const char * const excnames[] = {
9188 [EXCP_UDEF] = "Undefined Instruction",
9190 [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
9191 [EXCP_DATA_ABORT] = "Data Abort",
9194 [EXCP_BKPT] = "Breakpoint",
9195 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
9196 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
9197 [EXCP_HVC] = "Hypervisor Call",
9198 [EXCP_HYP_TRAP] = "Hypervisor Trap",
9199 [EXCP_SMC] = "Secure Monitor Call",
9200 [EXCP_VIRQ] = "Virtual IRQ",
9201 [EXCP_VFIQ] = "Virtual FIQ",
9202 [EXCP_SEMIHOST] = "Semihosting call",
9203 [EXCP_NOCP] = "v7M NOCP UsageFault",
9204 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
9205 [EXCP_STKOF] = "v8M STKOF UsageFault",
9206 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
9207 [EXCP_LSERR] = "v8M LSERR UsageFault",
9208 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
9211 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
9212 exc = excnames[idx];
9217 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
9222 * Function used to synchronize QEMU's AArch64 register set with AArch32
9223 * register set. This is necessary when switching between AArch32 and AArch64
9226 void aarch64_sync_32_to_64(CPUARMState *env)
9229 uint32_t mode = env->uncached_cpsr & CPSR_M;
9231 /* We can blanket copy R[0:7] to X[0:7] */
9232 for (i = 0; i < 8; i++) {
9233 env->xregs[i] = env->regs[i];
9237 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9238 * Otherwise, they come from the banked user regs.
9240 if (mode == ARM_CPU_MODE_FIQ) {
9241 for (i = 8; i < 13; i++) {
9242 env->xregs[i] = env->usr_regs[i - 8];
9245 for (i = 8; i < 13; i++) {
9246 env->xregs[i] = env->regs[i];
9251 * Registers x13-x23 are the various mode SP and FP registers. Registers
9252 * r13 and r14 are only copied if we are in that mode, otherwise we copy
9253 * from the mode banked register.
9255 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9256 env->xregs[13] = env->regs[13];
9257 env->xregs[14] = env->regs[14];
9259 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9260 /* HYP is an exception in that it is copied from r14 */
9261 if (mode == ARM_CPU_MODE_HYP) {
9262 env->xregs[14] = env->regs[14];
9264 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
9268 if (mode == ARM_CPU_MODE_HYP) {
9269 env->xregs[15] = env->regs[13];
9271 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
9274 if (mode == ARM_CPU_MODE_IRQ) {
9275 env->xregs[16] = env->regs[14];
9276 env->xregs[17] = env->regs[13];
9278 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9279 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9282 if (mode == ARM_CPU_MODE_SVC) {
9283 env->xregs[18] = env->regs[14];
9284 env->xregs[19] = env->regs[13];
9286 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9287 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9290 if (mode == ARM_CPU_MODE_ABT) {
9291 env->xregs[20] = env->regs[14];
9292 env->xregs[21] = env->regs[13];
9294 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
9295 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
9298 if (mode == ARM_CPU_MODE_UND) {
9299 env->xregs[22] = env->regs[14];
9300 env->xregs[23] = env->regs[13];
9302 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
9303 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
9307 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9308 * mode, then we can copy from r8-r14. Otherwise, we copy from the
9309 * FIQ bank for r8-r14.
9311 if (mode == ARM_CPU_MODE_FIQ) {
9312 for (i = 24; i < 31; i++) {
9313 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
9316 for (i = 24; i < 29; i++) {
9317 env->xregs[i] = env->fiq_regs[i - 24];
9319 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
9320 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
9323 env->pc = env->regs[15];
9327 * Function used to synchronize QEMU's AArch32 register set with AArch64
9328 * register set. This is necessary when switching between AArch32 and AArch64
9331 void aarch64_sync_64_to_32(CPUARMState *env)
9334 uint32_t mode = env->uncached_cpsr & CPSR_M;
9336 /* We can blanket copy X[0:7] to R[0:7] */
9337 for (i = 0; i < 8; i++) {
9338 env->regs[i] = env->xregs[i];
9342 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9343 * Otherwise, we copy x8-x12 into the banked user regs.
9345 if (mode == ARM_CPU_MODE_FIQ) {
9346 for (i = 8; i < 13; i++) {
9347 env->usr_regs[i - 8] = env->xregs[i];
9350 for (i = 8; i < 13; i++) {
9351 env->regs[i] = env->xregs[i];
9356 * Registers r13 & r14 depend on the current mode.
9357 * If we are in a given mode, we copy the corresponding x registers to r13
9358 * and r14. Otherwise, we copy the x register to the banked r13 and r14
9361 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9362 env->regs[13] = env->xregs[13];
9363 env->regs[14] = env->xregs[14];
9365 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
9368 * HYP is an exception in that it does not have its own banked r14 but
9369 * shares the USR r14
9371 if (mode == ARM_CPU_MODE_HYP) {
9372 env->regs[14] = env->xregs[14];
9374 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
9378 if (mode == ARM_CPU_MODE_HYP) {
9379 env->regs[13] = env->xregs[15];
9381 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
9384 if (mode == ARM_CPU_MODE_IRQ) {
9385 env->regs[14] = env->xregs[16];
9386 env->regs[13] = env->xregs[17];
9388 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
9389 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
9392 if (mode == ARM_CPU_MODE_SVC) {
9393 env->regs[14] = env->xregs[18];
9394 env->regs[13] = env->xregs[19];
9396 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
9397 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
9400 if (mode == ARM_CPU_MODE_ABT) {
9401 env->regs[14] = env->xregs[20];
9402 env->regs[13] = env->xregs[21];
9404 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
9405 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
9408 if (mode == ARM_CPU_MODE_UND) {
9409 env->regs[14] = env->xregs[22];
9410 env->regs[13] = env->xregs[23];
9412 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
9413 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
9416 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
9417 * mode, then we can copy to r8-r14. Otherwise, we copy to the
9418 * FIQ bank for r8-r14.
9420 if (mode == ARM_CPU_MODE_FIQ) {
9421 for (i = 24; i < 31; i++) {
9422 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
9425 for (i = 24; i < 29; i++) {
9426 env->fiq_regs[i - 24] = env->xregs[i];
9428 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
9429 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
9432 env->regs[15] = env->pc;
9435 static void take_aarch32_exception(CPUARMState *env, int new_mode,
9436 uint32_t mask, uint32_t offset,
9441 /* Change the CPU state so as to actually take the exception. */
9442 switch_mode(env, new_mode);
9445 * For exceptions taken to AArch32 we must clear the SS bit in both
9446 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9448 env->pstate &= ~PSTATE_SS;
9449 env->spsr = cpsr_read(env);
9450 /* Clear IT bits. */
9451 env->condexec_bits = 0;
9452 /* Switch to the new mode, and to the correct instruction set. */
9453 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
9455 /* This must be after mode switching. */
9456 new_el = arm_current_el(env);
9458 /* Set new mode endianness */
9459 env->uncached_cpsr &= ~CPSR_E;
9460 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
9461 env->uncached_cpsr |= CPSR_E;
9463 /* J and IL must always be cleared for exception entry */
9464 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
9467 if (new_mode == ARM_CPU_MODE_HYP) {
9468 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
9469 env->elr_el[2] = env->regs[15];
9471 /* CPSR.PAN is normally preserved preserved unless... */
9472 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
9475 if (!arm_is_secure_below_el3(env)) {
9476 /* ... the target is EL3, from non-secure state. */
9477 env->uncached_cpsr &= ~CPSR_PAN;
9480 /* ... the target is EL3, from secure state ... */
9483 /* ... the target is EL1 and SCTLR.SPAN is 0. */
9484 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
9485 env->uncached_cpsr |= CPSR_PAN;
9491 * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9492 * and we should just guard the thumb mode on V4
9494 if (arm_feature(env, ARM_FEATURE_V4T)) {
9496 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
9498 env->regs[14] = env->regs[15] + offset;
9500 env->regs[15] = newpc;
9501 arm_rebuild_hflags(env);
9504 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
9507 * Handle exception entry to Hyp mode; this is sufficiently
9508 * different to entry to other AArch32 modes that we handle it
9511 * The vector table entry used is always the 0x14 Hyp mode entry point,
9512 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9513 * The offset applied to the preferred return address is always zero
9514 * (see DDI0487C.a section G1.12.3).
9515 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9517 uint32_t addr, mask;
9518 ARMCPU *cpu = ARM_CPU(cs);
9519 CPUARMState *env = &cpu->env;
9521 switch (cs->exception_index) {
9529 /* Fall through to prefetch abort. */
9530 case EXCP_PREFETCH_ABORT:
9531 env->cp15.ifar_s = env->exception.vaddress;
9532 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
9533 (uint32_t)env->exception.vaddress);
9536 case EXCP_DATA_ABORT:
9537 env->cp15.dfar_s = env->exception.vaddress;
9538 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
9539 (uint32_t)env->exception.vaddress);
9555 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9558 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
9559 if (!arm_feature(env, ARM_FEATURE_V8)) {
9561 * QEMU syndrome values are v8-style. v7 has the IL bit
9562 * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9563 * If this is a v7 CPU, squash the IL bit in those cases.
9565 if (cs->exception_index == EXCP_PREFETCH_ABORT ||
9566 (cs->exception_index == EXCP_DATA_ABORT &&
9567 !(env->exception.syndrome & ARM_EL_ISV)) ||
9568 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
9569 env->exception.syndrome &= ~ARM_EL_IL;
9572 env->cp15.esr_el[2] = env->exception.syndrome;
9575 if (arm_current_el(env) != 2 && addr < 0x14) {
9580 if (!(env->cp15.scr_el3 & SCR_EA)) {
9583 if (!(env->cp15.scr_el3 & SCR_IRQ)) {
9586 if (!(env->cp15.scr_el3 & SCR_FIQ)) {
9590 addr += env->cp15.hvbar;
9592 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
9595 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
9597 ARMCPU *cpu = ARM_CPU(cs);
9598 CPUARMState *env = &cpu->env;
9605 /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9606 switch (syn_get_ec(env->exception.syndrome)) {
9608 case EC_BREAKPOINT_SAME_EL:
9612 case EC_WATCHPOINT_SAME_EL:
9618 case EC_VECTORCATCH:
9627 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
9630 if (env->exception.target_el == 2) {
9631 arm_cpu_do_interrupt_aarch32_hyp(cs);
9635 switch (cs->exception_index) {
9637 new_mode = ARM_CPU_MODE_UND;
9646 new_mode = ARM_CPU_MODE_SVC;
9649 /* The PC already points to the next instruction. */
9653 /* Fall through to prefetch abort. */
9654 case EXCP_PREFETCH_ABORT:
9655 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
9656 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
9657 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
9658 env->exception.fsr, (uint32_t)env->exception.vaddress);
9659 new_mode = ARM_CPU_MODE_ABT;
9661 mask = CPSR_A | CPSR_I;
9664 case EXCP_DATA_ABORT:
9665 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9666 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
9667 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
9669 (uint32_t)env->exception.vaddress);
9670 new_mode = ARM_CPU_MODE_ABT;
9672 mask = CPSR_A | CPSR_I;
9676 new_mode = ARM_CPU_MODE_IRQ;
9678 /* Disable IRQ and imprecise data aborts. */
9679 mask = CPSR_A | CPSR_I;
9681 if (env->cp15.scr_el3 & SCR_IRQ) {
9682 /* IRQ routed to monitor mode */
9683 new_mode = ARM_CPU_MODE_MON;
9688 new_mode = ARM_CPU_MODE_FIQ;
9690 /* Disable FIQ, IRQ and imprecise data aborts. */
9691 mask = CPSR_A | CPSR_I | CPSR_F;
9692 if (env->cp15.scr_el3 & SCR_FIQ) {
9693 /* FIQ routed to monitor mode */
9694 new_mode = ARM_CPU_MODE_MON;
9699 new_mode = ARM_CPU_MODE_IRQ;
9701 /* Disable IRQ and imprecise data aborts. */
9702 mask = CPSR_A | CPSR_I;
9706 new_mode = ARM_CPU_MODE_FIQ;
9708 /* Disable FIQ, IRQ and imprecise data aborts. */
9709 mask = CPSR_A | CPSR_I | CPSR_F;
9713 new_mode = ARM_CPU_MODE_MON;
9715 mask = CPSR_A | CPSR_I | CPSR_F;
9719 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9720 return; /* Never happens. Keep compiler happy. */
9723 if (new_mode == ARM_CPU_MODE_MON) {
9724 addr += env->cp15.mvbar;
9725 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
9726 /* High vectors. When enabled, base address cannot be remapped. */
9729 /* ARM v7 architectures provide a vector base address register to remap
9730 * the interrupt vector table.
9731 * This register is only followed in non-monitor mode, and is banked.
9732 * Note: only bits 31:5 are valid.
9734 addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
9737 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9738 env->cp15.scr_el3 &= ~SCR_NS;
9741 take_aarch32_exception(env, new_mode, mask, offset, addr);
9744 static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
9747 * Return the register number of the AArch64 view of the AArch32
9748 * register @aarch32_reg. The CPUARMState CPSR is assumed to still
9749 * be that of the AArch32 mode the exception came from.
9751 int mode = env->uncached_cpsr & CPSR_M;
9753 switch (aarch32_reg) {
9757 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
9760 case ARM_CPU_MODE_USR:
9761 case ARM_CPU_MODE_SYS:
9763 case ARM_CPU_MODE_HYP:
9765 case ARM_CPU_MODE_IRQ:
9767 case ARM_CPU_MODE_SVC:
9769 case ARM_CPU_MODE_ABT:
9771 case ARM_CPU_MODE_UND:
9773 case ARM_CPU_MODE_FIQ:
9776 g_assert_not_reached();
9780 case ARM_CPU_MODE_USR:
9781 case ARM_CPU_MODE_SYS:
9782 case ARM_CPU_MODE_HYP:
9784 case ARM_CPU_MODE_IRQ:
9786 case ARM_CPU_MODE_SVC:
9788 case ARM_CPU_MODE_ABT:
9790 case ARM_CPU_MODE_UND:
9792 case ARM_CPU_MODE_FIQ:
9795 g_assert_not_reached();
9800 g_assert_not_reached();
9804 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
9806 uint32_t ret = cpsr_read(env);
9808 /* Move DIT to the correct location for SPSR_ELx */
9809 if (ret & CPSR_DIT) {
9813 /* Merge PSTATE.SS into SPSR_ELx */
9814 ret |= env->pstate & PSTATE_SS;
9819 /* Handle exception entry to a target EL which is using AArch64 */
9820 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
9822 ARMCPU *cpu = ARM_CPU(cs);
9823 CPUARMState *env = &cpu->env;
9824 unsigned int new_el = env->exception.target_el;
9825 target_ulong addr = env->cp15.vbar_el[new_el];
9826 unsigned int new_mode = aarch64_pstate_mode(new_el, true);
9827 unsigned int old_mode;
9828 unsigned int cur_el = arm_current_el(env);
9832 * Note that new_el can never be 0. If cur_el is 0, then
9833 * el0_a64 is is_a64(), else el0_a64 is ignored.
9835 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
9837 if (cur_el < new_el) {
9838 /* Entry vector offset depends on whether the implemented EL
9839 * immediately lower than the target level is using AArch32 or AArch64
9846 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
9849 hcr = arm_hcr_el2_eff(env);
9850 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9851 is_aa64 = (hcr & HCR_RW) != 0;
9856 is_aa64 = is_a64(env);
9859 g_assert_not_reached();
9867 } else if (pstate_read(env) & PSTATE_SP) {
9871 switch (cs->exception_index) {
9872 case EXCP_PREFETCH_ABORT:
9873 case EXCP_DATA_ABORT:
9874 env->cp15.far_el[new_el] = env->exception.vaddress;
9875 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
9876 env->cp15.far_el[new_el]);
9884 switch (syn_get_ec(env->exception.syndrome)) {
9885 case EC_ADVSIMDFPACCESSTRAP:
9887 * QEMU internal FP/SIMD syndromes from AArch32 include the
9888 * TA and coproc fields which are only exposed if the exception
9889 * is taken to AArch32 Hyp mode. Mask them out to get a valid
9890 * AArch64 format syndrome.
9892 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
9898 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
9899 * the raw register field from the insn; when taking this to
9900 * AArch64 we must convert it to the AArch64 view of the register
9901 * number. Notice that we read a 4-bit AArch32 register number and
9902 * write back a 5-bit AArch64 one.
9904 rt = extract32(env->exception.syndrome, 5, 4);
9905 rt = aarch64_regnum(env, rt);
9906 env->exception.syndrome = deposit32(env->exception.syndrome,
9909 case EC_CP15RRTTRAP:
9910 case EC_CP14RRTTRAP:
9911 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
9912 rt = extract32(env->exception.syndrome, 5, 4);
9913 rt = aarch64_regnum(env, rt);
9914 env->exception.syndrome = deposit32(env->exception.syndrome,
9916 rt = extract32(env->exception.syndrome, 10, 4);
9917 rt = aarch64_regnum(env, rt);
9918 env->exception.syndrome = deposit32(env->exception.syndrome,
9922 env->cp15.esr_el[new_el] = env->exception.syndrome;
9933 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9937 old_mode = pstate_read(env);
9938 aarch64_save_sp(env, arm_current_el(env));
9939 env->elr_el[new_el] = env->pc;
9941 old_mode = cpsr_read_for_spsr_elx(env);
9942 env->elr_el[new_el] = env->regs[15];
9944 aarch64_sync_32_to_64(env);
9946 env->condexec_bits = 0;
9948 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
9950 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
9951 env->elr_el[new_el]);
9953 if (cpu_isar_feature(aa64_pan, cpu)) {
9954 /* The value of PSTATE.PAN is normally preserved, except when ... */
9955 new_mode |= old_mode & PSTATE_PAN;
9958 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */
9959 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
9960 != (HCR_E2H | HCR_TGE)) {
9965 /* ... the target is EL1 ... */
9966 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */
9967 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
9968 new_mode |= PSTATE_PAN;
9973 if (cpu_isar_feature(aa64_mte, cpu)) {
9974 new_mode |= PSTATE_TCO;
9977 pstate_write(env, PSTATE_DAIF | new_mode);
9979 aarch64_restore_sp(env, new_el);
9980 helper_rebuild_hflags_a64(env, new_el);
9984 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
9985 new_el, env->pc, pstate_read(env));
9989 * Do semihosting call and set the appropriate return value. All the
9990 * permission and validity checks have been done at translate time.
9992 * We only see semihosting exceptions in TCG only as they are not
9993 * trapped to the hypervisor in KVM.
9996 static void handle_semihosting(CPUState *cs)
9998 ARMCPU *cpu = ARM_CPU(cs);
9999 CPUARMState *env = &cpu->env;
10002 qemu_log_mask(CPU_LOG_INT,
10003 "...handling as semihosting call 0x%" PRIx64 "\n",
10005 env->xregs[0] = do_common_semihosting(cs);
10008 qemu_log_mask(CPU_LOG_INT,
10009 "...handling as semihosting call 0x%x\n",
10011 env->regs[0] = do_common_semihosting(cs);
10012 env->regs[15] += env->thumb ? 2 : 4;
10017 /* Handle a CPU exception for A and R profile CPUs.
10018 * Do any appropriate logging, handle PSCI calls, and then hand off
10019 * to the AArch64-entry or AArch32-entry function depending on the
10020 * target exception level's register width.
10022 * Note: this is used for both TCG (as the do_interrupt tcg op),
10023 * and KVM to re-inject guest debug exceptions, and to
10024 * inject a Synchronous-External-Abort.
10026 void arm_cpu_do_interrupt(CPUState *cs)
10028 ARMCPU *cpu = ARM_CPU(cs);
10029 CPUARMState *env = &cpu->env;
10030 unsigned int new_el = env->exception.target_el;
10032 assert(!arm_feature(env, ARM_FEATURE_M));
10034 arm_log_exception(cs->exception_index);
10035 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
10037 if (qemu_loglevel_mask(CPU_LOG_INT)
10038 && !excp_is_internal(cs->exception_index)) {
10039 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
10040 syn_get_ec(env->exception.syndrome),
10041 env->exception.syndrome);
10044 if (arm_is_psci_call(cpu, cs->exception_index)) {
10045 arm_handle_psci_call(cpu);
10046 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
10051 * Semihosting semantics depend on the register width of the code
10052 * that caused the exception, not the target exception level, so
10053 * must be handled here.
10056 if (cs->exception_index == EXCP_SEMIHOST) {
10057 handle_semihosting(cs);
10062 /* Hooks may change global state so BQL should be held, also the
10063 * BQL needs to be held for any modification of
10064 * cs->interrupt_request.
10066 g_assert(qemu_mutex_iothread_locked());
10068 arm_call_pre_el_change_hook(cpu);
10070 assert(!excp_is_internal(cs->exception_index));
10071 if (arm_el_is_aa64(env, new_el)) {
10072 arm_cpu_do_interrupt_aarch64(cs);
10074 arm_cpu_do_interrupt_aarch32(cs);
10077 arm_call_el_change_hook(cpu);
10079 if (!kvm_enabled()) {
10080 cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
10083 #endif /* !CONFIG_USER_ONLY */
10085 uint64_t arm_sctlr(CPUARMState *env, int el)
10087 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
10089 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
10090 el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
10093 return env->cp15.sctlr_el[el];
10096 /* Return the SCTLR value which controls this address translation regime */
10097 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
10099 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
10102 #ifndef CONFIG_USER_ONLY
10104 /* Return true if the specified stage of address translation is disabled */
10105 static inline bool regime_translation_disabled(CPUARMState *env,
10110 if (arm_feature(env, ARM_FEATURE_M)) {
10111 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
10112 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
10113 case R_V7M_MPU_CTRL_ENABLE_MASK:
10114 /* Enabled, but not for HardFault and NMI */
10115 return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
10116 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
10117 /* Enabled for all cases */
10121 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
10122 * we warned about that in armv7m_nvic.c when the guest set it.
10128 hcr_el2 = arm_hcr_el2_eff(env);
10130 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
10131 /* HCR.DC means HCR.VM behaves as 1 */
10132 return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
10135 if (hcr_el2 & HCR_TGE) {
10136 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
10137 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
10142 if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
10143 /* HCR.DC means SCTLR_EL1.M behaves as 0 */
10147 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
10150 static inline bool regime_translation_big_endian(CPUARMState *env,
10153 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
10156 /* Return the TTBR associated with this translation regime */
10157 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
10160 if (mmu_idx == ARMMMUIdx_Stage2) {
10161 return env->cp15.vttbr_el2;
10163 if (mmu_idx == ARMMMUIdx_Stage2_S) {
10164 return env->cp15.vsttbr_el2;
10167 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
10169 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
10173 #endif /* !CONFIG_USER_ONLY */
10175 /* Convert a possible stage1+2 MMU index into the appropriate
10176 * stage 1 MMU index
10178 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
10181 case ARMMMUIdx_SE10_0:
10182 return ARMMMUIdx_Stage1_SE0;
10183 case ARMMMUIdx_SE10_1:
10184 return ARMMMUIdx_Stage1_SE1;
10185 case ARMMMUIdx_SE10_1_PAN:
10186 return ARMMMUIdx_Stage1_SE1_PAN;
10187 case ARMMMUIdx_E10_0:
10188 return ARMMMUIdx_Stage1_E0;
10189 case ARMMMUIdx_E10_1:
10190 return ARMMMUIdx_Stage1_E1;
10191 case ARMMMUIdx_E10_1_PAN:
10192 return ARMMMUIdx_Stage1_E1_PAN;
10198 /* Return true if the translation regime is using LPAE format page tables */
10199 static inline bool regime_using_lpae_format(CPUARMState *env,
10202 int el = regime_el(env, mmu_idx);
10203 if (el == 2 || arm_el_is_aa64(env, el)) {
10206 if (arm_feature(env, ARM_FEATURE_LPAE)
10207 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
10213 /* Returns true if the stage 1 translation regime is using LPAE format page
10214 * tables. Used when raising alignment exceptions, whose FSR changes depending
10215 * on whether the long or short descriptor format is in use. */
10216 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
10218 mmu_idx = stage_1_mmu_idx(mmu_idx);
10220 return regime_using_lpae_format(env, mmu_idx);
10223 #ifndef CONFIG_USER_ONLY
10224 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
10227 case ARMMMUIdx_SE10_0:
10228 case ARMMMUIdx_E20_0:
10229 case ARMMMUIdx_SE20_0:
10230 case ARMMMUIdx_Stage1_E0:
10231 case ARMMMUIdx_Stage1_SE0:
10232 case ARMMMUIdx_MUser:
10233 case ARMMMUIdx_MSUser:
10234 case ARMMMUIdx_MUserNegPri:
10235 case ARMMMUIdx_MSUserNegPri:
10239 case ARMMMUIdx_E10_0:
10240 case ARMMMUIdx_E10_1:
10241 case ARMMMUIdx_E10_1_PAN:
10242 g_assert_not_reached();
10246 /* Translate section/page access permissions to page
10247 * R/W protection flags
10249 * @env: CPUARMState
10250 * @mmu_idx: MMU index indicating required translation regime
10251 * @ap: The 3-bit access permissions (AP[2:0])
10252 * @domain_prot: The 2-bit domain access permissions
10254 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
10255 int ap, int domain_prot)
10257 bool is_user = regime_is_user(env, mmu_idx);
10259 if (domain_prot == 3) {
10260 return PAGE_READ | PAGE_WRITE;
10265 if (arm_feature(env, ARM_FEATURE_V7)) {
10268 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
10270 return is_user ? 0 : PAGE_READ;
10277 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
10282 return PAGE_READ | PAGE_WRITE;
10285 return PAGE_READ | PAGE_WRITE;
10286 case 4: /* Reserved. */
10289 return is_user ? 0 : PAGE_READ;
10293 if (!arm_feature(env, ARM_FEATURE_V6K)) {
10298 g_assert_not_reached();
10302 /* Translate section/page access permissions to page
10303 * R/W protection flags.
10305 * @ap: The 2-bit simple AP (AP[2:1])
10306 * @is_user: TRUE if accessing from PL0
10308 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
10312 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
10314 return PAGE_READ | PAGE_WRITE;
10316 return is_user ? 0 : PAGE_READ;
10320 g_assert_not_reached();
10325 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
10327 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
10330 /* Translate S2 section/page access permissions to protection flags
10332 * @env: CPUARMState
10333 * @s2ap: The 2-bit stage2 access permissions (S2AP)
10334 * @xn: XN (execute-never) bits
10335 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
10337 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
10345 prot |= PAGE_WRITE;
10348 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
10366 g_assert_not_reached();
10369 if (!extract32(xn, 1, 1)) {
10370 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
10378 /* Translate section/page access permissions to protection flags
10380 * @env: CPUARMState
10381 * @mmu_idx: MMU index indicating required translation regime
10382 * @is_aa64: TRUE if AArch64
10383 * @ap: The 2-bit simple AP (AP[2:1])
10384 * @ns: NS (non-secure) bit
10385 * @xn: XN (execute-never) bit
10386 * @pxn: PXN (privileged execute-never) bit
10388 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
10389 int ap, int ns, int xn, int pxn)
10391 bool is_user = regime_is_user(env, mmu_idx);
10392 int prot_rw, user_rw;
10396 assert(mmu_idx != ARMMMUIdx_Stage2);
10397 assert(mmu_idx != ARMMMUIdx_Stage2_S);
10399 user_rw = simple_ap_to_rw_prot_is_user(ap, true);
10403 if (user_rw && regime_is_pan(env, mmu_idx)) {
10404 /* PAN forbids data accesses but doesn't affect insn fetch */
10407 prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
10411 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
10415 /* TODO have_wxn should be replaced with
10416 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10417 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10418 * compatible processors have EL2, which is required for [U]WXN.
10420 have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
10423 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
10427 if (regime_has_2_ranges(mmu_idx) && !is_user) {
10428 xn = pxn || (user_rw & PAGE_WRITE);
10430 } else if (arm_feature(env, ARM_FEATURE_V7)) {
10431 switch (regime_el(env, mmu_idx)) {
10435 xn = xn || !(user_rw & PAGE_READ);
10439 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
10441 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
10442 (uwxn && (user_rw & PAGE_WRITE));
10452 if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
10455 return prot_rw | PAGE_EXEC;
10458 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
10459 uint32_t *table, uint32_t address)
10461 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10462 TCR *tcr = regime_tcr(env, mmu_idx);
10464 if (address & tcr->mask) {
10465 if (tcr->raw_tcr & TTBCR_PD1) {
10466 /* Translation table walk disabled for TTBR1 */
10469 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
10471 if (tcr->raw_tcr & TTBCR_PD0) {
10472 /* Translation table walk disabled for TTBR0 */
10475 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
10477 *table |= (address >> 18) & 0x3ffc;
10481 /* Translate a S1 pagetable walk through S2 if needed. */
10482 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
10483 hwaddr addr, bool *is_secure,
10484 ARMMMUFaultInfo *fi)
10486 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
10487 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
10488 target_ulong s2size;
10492 ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
10493 : ARMMMUIdx_Stage2;
10494 ARMCacheAttrs cacheattrs = {};
10495 MemTxAttrs txattrs = {};
10497 ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
10498 &s2pa, &txattrs, &s2prot, &s2size, fi,
10501 assert(fi->type != ARMFault_None);
10505 fi->s1ns = !*is_secure;
10508 if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
10509 (cacheattrs.attrs & 0xf0) == 0) {
10511 * PTW set and S1 walk touched S2 Device memory:
10512 * generate Permission fault.
10514 fi->type = ARMFault_Permission;
10518 fi->s1ns = !*is_secure;
10522 if (arm_is_secure_below_el3(env)) {
10523 /* Check if page table walk is to secure or non-secure PA space. */
10525 *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
10527 *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
10530 assert(!*is_secure);
10538 /* All loads done in the course of a page table walk go through here. */
10539 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10540 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10542 ARMCPU *cpu = ARM_CPU(cs);
10543 CPUARMState *env = &cpu->env;
10544 MemTxAttrs attrs = {};
10545 MemTxResult result = MEMTX_OK;
10549 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
10550 attrs.secure = is_secure;
10551 as = arm_addressspace(cs, attrs);
10555 if (regime_translation_big_endian(env, mmu_idx)) {
10556 data = address_space_ldl_be(as, addr, attrs, &result);
10558 data = address_space_ldl_le(as, addr, attrs, &result);
10560 if (result == MEMTX_OK) {
10563 fi->type = ARMFault_SyncExternalOnWalk;
10564 fi->ea = arm_extabort_type(result);
10568 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10569 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10571 ARMCPU *cpu = ARM_CPU(cs);
10572 CPUARMState *env = &cpu->env;
10573 MemTxAttrs attrs = {};
10574 MemTxResult result = MEMTX_OK;
10578 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
10579 attrs.secure = is_secure;
10580 as = arm_addressspace(cs, attrs);
10584 if (regime_translation_big_endian(env, mmu_idx)) {
10585 data = address_space_ldq_be(as, addr, attrs, &result);
10587 data = address_space_ldq_le(as, addr, attrs, &result);
10589 if (result == MEMTX_OK) {
10592 fi->type = ARMFault_SyncExternalOnWalk;
10593 fi->ea = arm_extabort_type(result);
10597 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
10598 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10599 hwaddr *phys_ptr, int *prot,
10600 target_ulong *page_size,
10601 ARMMMUFaultInfo *fi)
10603 CPUState *cs = env_cpu(env);
10614 /* Pagetable walk. */
10615 /* Lookup l1 descriptor. */
10616 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10617 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10618 fi->type = ARMFault_Translation;
10621 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10623 if (fi->type != ARMFault_None) {
10627 domain = (desc >> 5) & 0x0f;
10628 if (regime_el(env, mmu_idx) == 1) {
10629 dacr = env->cp15.dacr_ns;
10631 dacr = env->cp15.dacr_s;
10633 domain_prot = (dacr >> (domain * 2)) & 3;
10635 /* Section translation fault. */
10636 fi->type = ARMFault_Translation;
10642 if (domain_prot == 0 || domain_prot == 2) {
10643 fi->type = ARMFault_Domain;
10648 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10649 ap = (desc >> 10) & 3;
10650 *page_size = 1024 * 1024;
10652 /* Lookup l2 entry. */
10654 /* Coarse pagetable. */
10655 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10657 /* Fine pagetable. */
10658 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
10660 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10662 if (fi->type != ARMFault_None) {
10665 switch (desc & 3) {
10666 case 0: /* Page translation fault. */
10667 fi->type = ARMFault_Translation;
10669 case 1: /* 64k page. */
10670 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10671 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
10672 *page_size = 0x10000;
10674 case 2: /* 4k page. */
10675 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10676 ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
10677 *page_size = 0x1000;
10679 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10681 /* ARMv6/XScale extended small page format */
10682 if (arm_feature(env, ARM_FEATURE_XSCALE)
10683 || arm_feature(env, ARM_FEATURE_V6)) {
10684 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10685 *page_size = 0x1000;
10687 /* UNPREDICTABLE in ARMv5; we choose to take a
10688 * page translation fault.
10690 fi->type = ARMFault_Translation;
10694 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
10695 *page_size = 0x400;
10697 ap = (desc >> 4) & 3;
10700 /* Never happens, but compiler isn't smart enough to tell. */
10704 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10705 *prot |= *prot ? PAGE_EXEC : 0;
10706 if (!(*prot & (1 << access_type))) {
10707 /* Access permission fault. */
10708 fi->type = ARMFault_Permission;
10711 *phys_ptr = phys_addr;
10714 fi->domain = domain;
10719 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
10720 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10721 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10722 target_ulong *page_size, ARMMMUFaultInfo *fi)
10724 CPUState *cs = env_cpu(env);
10725 ARMCPU *cpu = env_archcpu(env);
10739 /* Pagetable walk. */
10740 /* Lookup l1 descriptor. */
10741 if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10742 /* Section translation fault if page walk is disabled by PD0 or PD1 */
10743 fi->type = ARMFault_Translation;
10746 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10748 if (fi->type != ARMFault_None) {
10752 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
10753 /* Section translation fault, or attempt to use the encoding
10754 * which is Reserved on implementations without PXN.
10756 fi->type = ARMFault_Translation;
10759 if ((type == 1) || !(desc & (1 << 18))) {
10760 /* Page or Section. */
10761 domain = (desc >> 5) & 0x0f;
10763 if (regime_el(env, mmu_idx) == 1) {
10764 dacr = env->cp15.dacr_ns;
10766 dacr = env->cp15.dacr_s;
10771 domain_prot = (dacr >> (domain * 2)) & 3;
10772 if (domain_prot == 0 || domain_prot == 2) {
10773 /* Section or Page domain fault */
10774 fi->type = ARMFault_Domain;
10778 if (desc & (1 << 18)) {
10779 /* Supersection. */
10780 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
10781 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
10782 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
10783 *page_size = 0x1000000;
10786 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10787 *page_size = 0x100000;
10789 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
10790 xn = desc & (1 << 4);
10792 ns = extract32(desc, 19, 1);
10794 if (cpu_isar_feature(aa32_pxn, cpu)) {
10795 pxn = (desc >> 2) & 1;
10797 ns = extract32(desc, 3, 1);
10798 /* Lookup l2 entry. */
10799 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10800 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10802 if (fi->type != ARMFault_None) {
10805 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
10806 switch (desc & 3) {
10807 case 0: /* Page translation fault. */
10808 fi->type = ARMFault_Translation;
10810 case 1: /* 64k page. */
10811 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10812 xn = desc & (1 << 15);
10813 *page_size = 0x10000;
10815 case 2: case 3: /* 4k page. */
10816 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10818 *page_size = 0x1000;
10821 /* Never happens, but compiler isn't smart enough to tell. */
10825 if (domain_prot == 3) {
10826 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10828 if (pxn && !regime_is_user(env, mmu_idx)) {
10831 if (xn && access_type == MMU_INST_FETCH) {
10832 fi->type = ARMFault_Permission;
10836 if (arm_feature(env, ARM_FEATURE_V6K) &&
10837 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
10838 /* The simplified model uses AP[0] as an access control bit. */
10839 if ((ap & 1) == 0) {
10840 /* Access flag fault. */
10841 fi->type = ARMFault_AccessFlag;
10844 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
10846 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10848 if (*prot && !xn) {
10849 *prot |= PAGE_EXEC;
10851 if (!(*prot & (1 << access_type))) {
10852 /* Access permission fault. */
10853 fi->type = ARMFault_Permission;
10858 /* The NS bit will (as required by the architecture) have no effect if
10859 * the CPU doesn't support TZ or this is a non-secure translation
10860 * regime, because the attribute will already be non-secure.
10862 attrs->secure = false;
10864 *phys_ptr = phys_addr;
10867 fi->domain = domain;
10873 * check_s2_mmu_setup
10875 * @is_aa64: True if the translation regime is in AArch64 state
10876 * @startlevel: Suggested starting level
10877 * @inputsize: Bitsize of IPAs
10878 * @stride: Page-table stride (See the ARM ARM)
10880 * Returns true if the suggested S2 translation parameters are OK and
10883 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
10884 int inputsize, int stride)
10886 const int grainsize = stride + 3;
10887 int startsizecheck;
10889 /* Negative levels are never allowed. */
10894 startsizecheck = inputsize - ((3 - level) * stride + grainsize);
10895 if (startsizecheck < 1 || startsizecheck > stride + 4) {
10900 CPUARMState *env = &cpu->env;
10901 unsigned int pamax = arm_pamax(cpu);
10904 case 13: /* 64KB Pages. */
10905 if (level == 0 || (level == 1 && pamax <= 42)) {
10909 case 11: /* 16KB Pages. */
10910 if (level == 0 || (level == 1 && pamax <= 40)) {
10914 case 9: /* 4KB Pages. */
10915 if (level == 0 && pamax <= 42) {
10920 g_assert_not_reached();
10923 /* Inputsize checks. */
10924 if (inputsize > pamax &&
10925 (arm_el_is_aa64(env, 1) || inputsize > 40)) {
10926 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
10930 /* AArch32 only supports 4KB pages. Assert on that. */
10931 assert(stride == 9);
10940 /* Translate from the 4-bit stage 2 representation of
10941 * memory attributes (without cache-allocation hints) to
10942 * the 8-bit representation of the stage 1 MAIR registers
10943 * (which includes allocation hints).
10945 * ref: shared/translation/attrs/S2AttrDecode()
10946 * .../S2ConvertAttrsHints()
10948 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
10950 uint8_t hiattr = extract32(s2attrs, 2, 2);
10951 uint8_t loattr = extract32(s2attrs, 0, 2);
10952 uint8_t hihint = 0, lohint = 0;
10954 if (hiattr != 0) { /* normal memory */
10955 if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
10956 hiattr = loattr = 1; /* non-cacheable */
10958 if (hiattr != 1) { /* Write-through or write-back */
10959 hihint = 3; /* RW allocate */
10961 if (loattr != 1) { /* Write-through or write-back */
10962 lohint = 3; /* RW allocate */
10967 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
10969 #endif /* !CONFIG_USER_ONLY */
10971 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
10973 if (regime_has_2_ranges(mmu_idx)) {
10974 return extract64(tcr, 37, 2);
10975 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
10976 return 0; /* VTCR_EL2 */
10978 /* Replicate the single TBI bit so we always have 2 bits. */
10979 return extract32(tcr, 20, 1) * 3;
10983 static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
10985 if (regime_has_2_ranges(mmu_idx)) {
10986 return extract64(tcr, 51, 2);
10987 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
10988 return 0; /* VTCR_EL2 */
10990 /* Replicate the single TBID bit so we always have 2 bits. */
10991 return extract32(tcr, 29, 1) * 3;
10995 static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
10997 if (regime_has_2_ranges(mmu_idx)) {
10998 return extract64(tcr, 57, 2);
11000 /* Replicate the single TCMA bit so we always have 2 bits. */
11001 return extract32(tcr, 30, 1) * 3;
11005 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
11006 ARMMMUIdx mmu_idx, bool data)
11008 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
11009 bool epd, hpd, using16k, using64k;
11010 int select, tsz, tbi, max_tsz;
11012 if (!regime_has_2_ranges(mmu_idx)) {
11014 tsz = extract32(tcr, 0, 6);
11015 using64k = extract32(tcr, 14, 1);
11016 using16k = extract32(tcr, 15, 1);
11017 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11021 hpd = extract32(tcr, 24, 1);
11026 * Bit 55 is always between the two regions, and is canonical for
11027 * determining if address tagging is enabled.
11029 select = extract64(va, 55, 1);
11031 tsz = extract32(tcr, 0, 6);
11032 epd = extract32(tcr, 7, 1);
11033 using64k = extract32(tcr, 14, 1);
11034 using16k = extract32(tcr, 15, 1);
11035 hpd = extract64(tcr, 41, 1);
11037 int tg = extract32(tcr, 30, 2);
11038 using16k = tg == 1;
11039 using64k = tg == 3;
11040 tsz = extract32(tcr, 16, 6);
11041 epd = extract32(tcr, 23, 1);
11042 hpd = extract64(tcr, 42, 1);
11046 if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
11047 max_tsz = 48 - using64k;
11052 tsz = MIN(tsz, max_tsz);
11053 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */
11055 /* Present TBI as a composite with TBID. */
11056 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
11058 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
11060 tbi = (tbi >> select) & 1;
11062 return (ARMVAParameters) {
11068 .using16k = using16k,
11069 .using64k = using64k,
11073 #ifndef CONFIG_USER_ONLY
11074 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
11077 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
11078 uint32_t el = regime_el(env, mmu_idx);
11082 assert(mmu_idx != ARMMMUIdx_Stage2_S);
11084 if (mmu_idx == ARMMMUIdx_Stage2) {
11086 bool sext = extract32(tcr, 4, 1);
11087 bool sign = extract32(tcr, 3, 1);
11090 * If the sign-extend bit is not the same as t0sz[3], the result
11091 * is unpredictable. Flag this as a guest error.
11093 if (sign != sext) {
11094 qemu_log_mask(LOG_GUEST_ERROR,
11095 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
11097 tsz = sextract32(tcr, 0, 4) + 8;
11101 } else if (el == 2) {
11103 tsz = extract32(tcr, 0, 3);
11105 hpd = extract64(tcr, 24, 1);
11108 int t0sz = extract32(tcr, 0, 3);
11109 int t1sz = extract32(tcr, 16, 3);
11112 select = va > (0xffffffffu >> t0sz);
11114 /* Note that we will detect errors later. */
11115 select = va >= ~(0xffffffffu >> t1sz);
11119 epd = extract32(tcr, 7, 1);
11120 hpd = extract64(tcr, 41, 1);
11123 epd = extract32(tcr, 23, 1);
11124 hpd = extract64(tcr, 42, 1);
11126 /* For aarch32, hpd0 is not enabled without t2e as well. */
11127 hpd &= extract32(tcr, 6, 1);
11130 return (ARMVAParameters) {
11139 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
11141 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
11142 * prot and page_size may not be filled in, and the populated fsr value provides
11143 * information on why the translation aborted, in the format of a long-format
11144 * DFSR/IFSR fault register, with the following caveats:
11145 * * the WnR bit is never set (the caller must do this).
11147 * @env: CPUARMState
11148 * @address: virtual address to get physical address for
11149 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
11150 * @mmu_idx: MMU index indicating required translation regime
11151 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
11152 * walk), must be true if this is stage 2 of a stage 1+2 walk for an
11153 * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
11154 * @phys_ptr: set to the physical address corresponding to the virtual address
11155 * @attrs: set to the memory transaction attributes to use
11156 * @prot: set to the permissions for the page containing phys_ptr
11157 * @page_size_ptr: set to the size of the page containing phys_ptr
11158 * @fi: set to fault info if the translation fails
11159 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
11161 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
11162 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11164 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
11165 target_ulong *page_size_ptr,
11166 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
11168 ARMCPU *cpu = env_archcpu(env);
11169 CPUState *cs = CPU(cpu);
11170 /* Read an LPAE long-descriptor translation table. */
11171 ARMFaultType fault_type = ARMFault_Translation;
11173 ARMVAParameters param;
11175 hwaddr descaddr, indexmask, indexmask_grainsize;
11176 uint32_t tableattrs;
11177 target_ulong page_size;
11180 int addrsize, inputsize;
11181 TCR *tcr = regime_tcr(env, mmu_idx);
11182 int ap, ns, xn, pxn;
11183 uint32_t el = regime_el(env, mmu_idx);
11184 uint64_t descaddrmask;
11185 bool aarch64 = arm_el_is_aa64(env, el);
11186 bool guarded = false;
11188 /* TODO: This code does not support shareability levels. */
11190 param = aa64_va_parameters(env, address, mmu_idx,
11191 access_type != MMU_INST_FETCH);
11193 addrsize = 64 - 8 * param.tbi;
11194 inputsize = 64 - param.tsz;
11196 param = aa32_va_parameters(env, address, mmu_idx);
11198 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
11199 inputsize = addrsize - param.tsz;
11203 * We determined the region when collecting the parameters, but we
11204 * have not yet validated that the address is valid for the region.
11205 * Extract the top bits and verify that they all match select.
11207 * For aa32, if inputsize == addrsize, then we have selected the
11208 * region by exclusion in aa32_va_parameters and there is no more
11209 * validation to do here.
11211 if (inputsize < addrsize) {
11212 target_ulong top_bits = sextract64(address, inputsize,
11213 addrsize - inputsize);
11214 if (-top_bits != param.select) {
11215 /* The gap between the two regions is a Translation fault */
11216 fault_type = ARMFault_Translation;
11221 if (param.using64k) {
11223 } else if (param.using16k) {
11229 /* Note that QEMU ignores shareability and cacheability attributes,
11230 * so we don't need to do anything with the SH, ORGN, IRGN fields
11231 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
11232 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
11233 * implement any ASID-like capability so we can ignore it (instead
11234 * we will always flush the TLB any time the ASID is changed).
11236 ttbr = regime_ttbr(env, mmu_idx, param.select);
11238 /* Here we should have set up all the parameters for the translation:
11239 * inputsize, ttbr, epd, stride, tbi
11243 /* Translation table walk disabled => Translation fault on TLB miss
11244 * Note: This is always 0 on 64-bit EL2 and EL3.
11249 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
11250 /* The starting level depends on the virtual address size (which can
11251 * be up to 48 bits) and the translation granule size. It indicates
11252 * the number of strides (stride bits at a time) needed to
11253 * consume the bits of the input address. In the pseudocode this is:
11254 * level = 4 - RoundUp((inputsize - grainsize) / stride)
11255 * where their 'inputsize' is our 'inputsize', 'grainsize' is
11256 * our 'stride + 3' and 'stride' is our 'stride'.
11257 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11258 * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11259 * = 4 - (inputsize - 4) / stride;
11261 level = 4 - (inputsize - 4) / stride;
11263 /* For stage 2 translations the starting level is specified by the
11264 * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11266 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
11267 uint32_t startlevel;
11270 if (!aarch64 || stride == 9) {
11271 /* AArch32 or 4KB pages */
11272 startlevel = 2 - sl0;
11274 if (cpu_isar_feature(aa64_st, cpu)) {
11278 /* 16KB or 64KB pages */
11279 startlevel = 3 - sl0;
11282 /* Check that the starting level is valid. */
11283 ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
11284 inputsize, stride);
11286 fault_type = ARMFault_Translation;
11289 level = startlevel;
11292 indexmask_grainsize = (1ULL << (stride + 3)) - 1;
11293 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
11295 /* Now we can extract the actual base address from the TTBR */
11296 descaddr = extract64(ttbr, 0, 48);
11298 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
11299 * and also to mask out CnP (bit 0) which could validly be non-zero.
11301 descaddr &= ~indexmask;
11303 /* The address field in the descriptor goes up to bit 39 for ARMv7
11304 * but up to bit 47 for ARMv8, but we use the descaddrmask
11305 * up to bit 39 for AArch32, because we don't need other bits in that case
11306 * to construct next descriptor address (anyway they should be all zeroes).
11308 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
11309 ~indexmask_grainsize;
11311 /* Secure accesses start with the page table in secure memory and
11312 * can be downgraded to non-secure at any step. Non-secure accesses
11313 * remain non-secure. We implement this by just ORing in the NSTable/NS
11314 * bits at each step.
11316 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
11318 uint64_t descriptor;
11321 descaddr |= (address >> (stride * (4 - level))) & indexmask;
11323 nstable = extract32(tableattrs, 4, 1);
11324 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
11325 if (fi->type != ARMFault_None) {
11329 if (!(descriptor & 1) ||
11330 (!(descriptor & 2) && (level == 3))) {
11331 /* Invalid, or the Reserved level 3 encoding */
11334 descaddr = descriptor & descaddrmask;
11336 if ((descriptor & 2) && (level < 3)) {
11337 /* Table entry. The top five bits are attributes which may
11338 * propagate down through lower levels of the table (and
11339 * which are all arranged so that 0 means "no effect", so
11340 * we can gather them up by ORing in the bits at each level).
11342 tableattrs |= extract64(descriptor, 59, 5);
11344 indexmask = indexmask_grainsize;
11347 /* Block entry at level 1 or 2, or page entry at level 3.
11348 * These are basically the same thing, although the number
11349 * of bits we pull in from the vaddr varies.
11351 page_size = (1ULL << ((stride * (4 - level)) + 3));
11352 descaddr |= (address & (page_size - 1));
11353 /* Extract attributes from the descriptor */
11354 attrs = extract64(descriptor, 2, 10)
11355 | (extract64(descriptor, 52, 12) << 10);
11357 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11358 /* Stage 2 table descriptors do not include any attribute fields */
11361 /* Merge in attributes from table descriptors */
11362 attrs |= nstable << 3; /* NS */
11363 guarded = extract64(descriptor, 50, 1); /* GP */
11365 /* HPD disables all the table attributes except NSTable. */
11368 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
11369 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
11370 * means "force PL1 access only", which means forcing AP[1] to 0.
11372 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
11373 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
11376 /* Here descaddr is the final physical address, and attributes
11377 * are all in attrs.
11379 fault_type = ARMFault_AccessFlag;
11380 if ((attrs & (1 << 8)) == 0) {
11385 ap = extract32(attrs, 4, 2);
11387 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11388 ns = mmu_idx == ARMMMUIdx_Stage2;
11389 xn = extract32(attrs, 11, 2);
11390 *prot = get_S2prot(env, ap, xn, s1_is_el0);
11392 ns = extract32(attrs, 3, 1);
11393 xn = extract32(attrs, 12, 1);
11394 pxn = extract32(attrs, 11, 1);
11395 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
11398 fault_type = ARMFault_Permission;
11399 if (!(*prot & (1 << access_type))) {
11404 /* The NS bit will (as required by the architecture) have no effect if
11405 * the CPU doesn't support TZ or this is a non-secure translation
11406 * regime, because the attribute will already be non-secure.
11408 txattrs->secure = false;
11410 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
11411 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
11412 arm_tlb_bti_gp(txattrs) = true;
11415 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
11416 cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
11418 /* Index into MAIR registers for cache attributes */
11419 uint8_t attrindx = extract32(attrs, 0, 3);
11420 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
11421 assert(attrindx <= 7);
11422 cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
11424 cacheattrs->shareability = extract32(attrs, 6, 2);
11426 *phys_ptr = descaddr;
11427 *page_size_ptr = page_size;
11431 fi->type = fault_type;
11433 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
11434 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
11435 mmu_idx == ARMMMUIdx_Stage2_S);
11436 fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
11440 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
11442 int32_t address, int *prot)
11444 if (!arm_feature(env, ARM_FEATURE_M)) {
11445 *prot = PAGE_READ | PAGE_WRITE;
11447 case 0xF0000000 ... 0xFFFFFFFF:
11448 if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
11449 /* hivecs execing is ok */
11450 *prot |= PAGE_EXEC;
11453 case 0x00000000 ... 0x7FFFFFFF:
11454 *prot |= PAGE_EXEC;
11458 /* Default system address map for M profile cores.
11459 * The architecture specifies which regions are execute-never;
11460 * at the MPU level no other checks are defined.
11463 case 0x00000000 ... 0x1fffffff: /* ROM */
11464 case 0x20000000 ... 0x3fffffff: /* SRAM */
11465 case 0x60000000 ... 0x7fffffff: /* RAM */
11466 case 0x80000000 ... 0x9fffffff: /* RAM */
11467 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11469 case 0x40000000 ... 0x5fffffff: /* Peripheral */
11470 case 0xa0000000 ... 0xbfffffff: /* Device */
11471 case 0xc0000000 ... 0xdfffffff: /* Device */
11472 case 0xe0000000 ... 0xffffffff: /* System */
11473 *prot = PAGE_READ | PAGE_WRITE;
11476 g_assert_not_reached();
11481 static bool pmsav7_use_background_region(ARMCPU *cpu,
11482 ARMMMUIdx mmu_idx, bool is_user)
11484 /* Return true if we should use the default memory map as a
11485 * "background" region if there are no hits against any MPU regions.
11487 CPUARMState *env = &cpu->env;
11493 if (arm_feature(env, ARM_FEATURE_M)) {
11494 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
11495 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
11497 return regime_sctlr(env, mmu_idx) & SCTLR_BR;
11501 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
11503 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11504 return arm_feature(env, ARM_FEATURE_M) &&
11505 extract32(address, 20, 12) == 0xe00;
11508 static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
11510 /* True if address is in the M profile system region
11511 * 0xe0000000 - 0xffffffff
11513 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
11516 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
11517 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11518 hwaddr *phys_ptr, int *prot,
11519 target_ulong *page_size,
11520 ARMMMUFaultInfo *fi)
11522 ARMCPU *cpu = env_archcpu(env);
11524 bool is_user = regime_is_user(env, mmu_idx);
11526 *phys_ptr = address;
11527 *page_size = TARGET_PAGE_SIZE;
11530 if (regime_translation_disabled(env, mmu_idx) ||
11531 m_is_ppb_region(env, address)) {
11532 /* MPU disabled or M profile PPB access: use default memory map.
11533 * The other case which uses the default memory map in the
11534 * v7M ARM ARM pseudocode is exception vector reads from the vector
11535 * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11536 * which always does a direct read using address_space_ldl(), rather
11537 * than going via this function, so we don't need to check that here.
11539 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11540 } else { /* MPU enabled */
11541 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
11542 /* region search */
11543 uint32_t base = env->pmsav7.drbar[n];
11544 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
11546 bool srdis = false;
11548 if (!(env->pmsav7.drsr[n] & 0x1)) {
11553 qemu_log_mask(LOG_GUEST_ERROR,
11554 "DRSR[%d]: Rsize field cannot be 0\n", n);
11558 rmask = (1ull << rsize) - 1;
11560 if (base & rmask) {
11561 qemu_log_mask(LOG_GUEST_ERROR,
11562 "DRBAR[%d]: 0x%" PRIx32 " misaligned "
11563 "to DRSR region size, mask = 0x%" PRIx32 "\n",
11568 if (address < base || address > base + rmask) {
11570 * Address not in this region. We must check whether the
11571 * region covers addresses in the same page as our address.
11572 * In that case we must not report a size that covers the
11573 * whole page for a subsequent hit against a different MPU
11574 * region or the background region, because it would result in
11575 * incorrect TLB hits for subsequent accesses to addresses that
11576 * are in this MPU region.
11578 if (ranges_overlap(base, rmask,
11579 address & TARGET_PAGE_MASK,
11580 TARGET_PAGE_SIZE)) {
11586 /* Region matched */
11588 if (rsize >= 8) { /* no subregions for regions < 256 bytes */
11590 uint32_t srdis_mask;
11592 rsize -= 3; /* sub region size (power of 2) */
11593 snd = ((address - base) >> rsize) & 0x7;
11594 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
11596 srdis_mask = srdis ? 0x3 : 0x0;
11597 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
11598 /* This will check in groups of 2, 4 and then 8, whether
11599 * the subregion bits are consistent. rsize is incremented
11600 * back up to give the region size, considering consistent
11601 * adjacent subregions as one region. Stop testing if rsize
11602 * is already big enough for an entire QEMU page.
11604 int snd_rounded = snd & ~(i - 1);
11605 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
11606 snd_rounded + 8, i);
11607 if (srdis_mask ^ srdis_multi) {
11610 srdis_mask = (srdis_mask << i) | srdis_mask;
11617 if (rsize < TARGET_PAGE_BITS) {
11618 *page_size = 1 << rsize;
11623 if (n == -1) { /* no hits */
11624 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
11625 /* background fault */
11626 fi->type = ARMFault_Background;
11629 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11630 } else { /* a MPU hit! */
11631 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
11632 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
11634 if (m_is_system_region(env, address)) {
11635 /* System space is always execute never */
11639 if (is_user) { /* User mode AP bit decoding */
11644 break; /* no access */
11646 *prot |= PAGE_WRITE;
11650 *prot |= PAGE_READ | PAGE_EXEC;
11653 /* for v7M, same as 6; for R profile a reserved value */
11654 if (arm_feature(env, ARM_FEATURE_M)) {
11655 *prot |= PAGE_READ | PAGE_EXEC;
11660 qemu_log_mask(LOG_GUEST_ERROR,
11661 "DRACR[%d]: Bad value for AP bits: 0x%"
11662 PRIx32 "\n", n, ap);
11664 } else { /* Priv. mode AP bits decoding */
11667 break; /* no access */
11671 *prot |= PAGE_WRITE;
11675 *prot |= PAGE_READ | PAGE_EXEC;
11678 /* for v7M, same as 6; for R profile a reserved value */
11679 if (arm_feature(env, ARM_FEATURE_M)) {
11680 *prot |= PAGE_READ | PAGE_EXEC;
11685 qemu_log_mask(LOG_GUEST_ERROR,
11686 "DRACR[%d]: Bad value for AP bits: 0x%"
11687 PRIx32 "\n", n, ap);
11691 /* execute never */
11693 *prot &= ~PAGE_EXEC;
11698 fi->type = ARMFault_Permission;
11700 return !(*prot & (1 << access_type));
11703 static bool v8m_is_sau_exempt(CPUARMState *env,
11704 uint32_t address, MMUAccessType access_type)
11706 /* The architecture specifies that certain address ranges are
11707 * exempt from v8M SAU/IDAU checks.
11710 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
11711 (address >= 0xe0000000 && address <= 0xe0002fff) ||
11712 (address >= 0xe000e000 && address <= 0xe000efff) ||
11713 (address >= 0xe002e000 && address <= 0xe002efff) ||
11714 (address >= 0xe0040000 && address <= 0xe0041fff) ||
11715 (address >= 0xe00ff000 && address <= 0xe00fffff);
11718 void v8m_security_lookup(CPUARMState *env, uint32_t address,
11719 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11720 V8M_SAttributes *sattrs)
11722 /* Look up the security attributes for this address. Compare the
11723 * pseudocode SecurityCheck() function.
11724 * We assume the caller has zero-initialized *sattrs.
11726 ARMCPU *cpu = env_archcpu(env);
11728 bool idau_exempt = false, idau_ns = true, idau_nsc = true;
11729 int idau_region = IREGION_NOTVALID;
11730 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11731 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11734 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
11735 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
11737 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
11741 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
11742 /* 0xf0000000..0xffffffff is always S for insn fetches */
11746 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
11747 sattrs->ns = !regime_is_secure(env, mmu_idx);
11751 if (idau_region != IREGION_NOTVALID) {
11752 sattrs->irvalid = true;
11753 sattrs->iregion = idau_region;
11756 switch (env->sau.ctrl & 3) {
11757 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11759 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11762 default: /* SAU.ENABLE == 1 */
11763 for (r = 0; r < cpu->sau_sregion; r++) {
11764 if (env->sau.rlar[r] & 1) {
11765 uint32_t base = env->sau.rbar[r] & ~0x1f;
11766 uint32_t limit = env->sau.rlar[r] | 0x1f;
11768 if (base <= address && limit >= address) {
11769 if (base > addr_page_base || limit < addr_page_limit) {
11770 sattrs->subpage = true;
11772 if (sattrs->srvalid) {
11773 /* If we hit in more than one region then we must report
11774 * as Secure, not NS-Callable, with no valid region
11777 sattrs->ns = false;
11778 sattrs->nsc = false;
11779 sattrs->sregion = 0;
11780 sattrs->srvalid = false;
11783 if (env->sau.rlar[r] & 2) {
11784 sattrs->nsc = true;
11788 sattrs->srvalid = true;
11789 sattrs->sregion = r;
11793 * Address not in this region. We must check whether the
11794 * region covers addresses in the same page as our address.
11795 * In that case we must not report a size that covers the
11796 * whole page for a subsequent hit against a different MPU
11797 * region or the background region, because it would result
11798 * in incorrect TLB hits for subsequent accesses to
11799 * addresses that are in this MPU region.
11801 if (limit >= base &&
11802 ranges_overlap(base, limit - base + 1,
11804 TARGET_PAGE_SIZE)) {
11805 sattrs->subpage = true;
11814 * The IDAU will override the SAU lookup results if it specifies
11815 * higher security than the SAU does.
11818 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
11819 sattrs->ns = false;
11820 sattrs->nsc = idau_nsc;
11825 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
11826 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11827 hwaddr *phys_ptr, MemTxAttrs *txattrs,
11828 int *prot, bool *is_subpage,
11829 ARMMMUFaultInfo *fi, uint32_t *mregion)
11831 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11832 * that a full phys-to-virt translation does).
11833 * mregion is (if not NULL) set to the region number which matched,
11834 * or -1 if no region number is returned (MPU off, address did not
11835 * hit a region, address hit in multiple regions).
11836 * We set is_subpage to true if the region hit doesn't cover the
11837 * entire TARGET_PAGE the address is within.
11839 ARMCPU *cpu = env_archcpu(env);
11840 bool is_user = regime_is_user(env, mmu_idx);
11841 uint32_t secure = regime_is_secure(env, mmu_idx);
11843 int matchregion = -1;
11845 uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11846 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11848 *is_subpage = false;
11849 *phys_ptr = address;
11855 /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11856 * was an exception vector read from the vector table (which is always
11857 * done using the default system address map), because those accesses
11858 * are done in arm_v7m_load_vector(), which always does a direct
11859 * read using address_space_ldl(), rather than going via this function.
11861 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
11863 } else if (m_is_ppb_region(env, address)) {
11866 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
11870 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
11871 /* region search */
11872 /* Note that the base address is bits [31:5] from the register
11873 * with bits [4:0] all zeroes, but the limit address is bits
11874 * [31:5] from the register with bits [4:0] all ones.
11876 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
11877 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
11879 if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
11880 /* Region disabled */
11884 if (address < base || address > limit) {
11886 * Address not in this region. We must check whether the
11887 * region covers addresses in the same page as our address.
11888 * In that case we must not report a size that covers the
11889 * whole page for a subsequent hit against a different MPU
11890 * region or the background region, because it would result in
11891 * incorrect TLB hits for subsequent accesses to addresses that
11892 * are in this MPU region.
11894 if (limit >= base &&
11895 ranges_overlap(base, limit - base + 1,
11897 TARGET_PAGE_SIZE)) {
11898 *is_subpage = true;
11903 if (base > addr_page_base || limit < addr_page_limit) {
11904 *is_subpage = true;
11907 if (matchregion != -1) {
11908 /* Multiple regions match -- always a failure (unlike
11909 * PMSAv7 where highest-numbered-region wins)
11911 fi->type = ARMFault_Permission;
11922 /* background fault */
11923 fi->type = ARMFault_Background;
11927 if (matchregion == -1) {
11928 /* hit using the background region */
11929 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11931 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
11932 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
11935 if (arm_feature(env, ARM_FEATURE_V8_1M)) {
11936 pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
11939 if (m_is_system_region(env, address)) {
11940 /* System space is always execute never */
11944 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
11945 if (*prot && !xn && !(pxn && !is_user)) {
11946 *prot |= PAGE_EXEC;
11948 /* We don't need to look the attribute up in the MAIR0/MAIR1
11949 * registers because that only tells us about cacheability.
11952 *mregion = matchregion;
11956 fi->type = ARMFault_Permission;
11958 return !(*prot & (1 << access_type));
11962 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
11963 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11964 hwaddr *phys_ptr, MemTxAttrs *txattrs,
11965 int *prot, target_ulong *page_size,
11966 ARMMMUFaultInfo *fi)
11968 uint32_t secure = regime_is_secure(env, mmu_idx);
11969 V8M_SAttributes sattrs = {};
11971 bool mpu_is_subpage;
11973 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
11974 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
11975 if (access_type == MMU_INST_FETCH) {
11976 /* Instruction fetches always use the MMU bank and the
11977 * transaction attribute determined by the fetch address,
11978 * regardless of CPU state. This is painful for QEMU
11979 * to handle, because it would mean we need to encode
11980 * into the mmu_idx not just the (user, negpri) information
11981 * for the current security state but also that for the
11982 * other security state, which would balloon the number
11983 * of mmu_idx values needed alarmingly.
11984 * Fortunately we can avoid this because it's not actually
11985 * possible to arbitrarily execute code from memory with
11986 * the wrong security attribute: it will always generate
11987 * an exception of some kind or another, apart from the
11988 * special case of an NS CPU executing an SG instruction
11989 * in S&NSC memory. So we always just fail the translation
11990 * here and sort things out in the exception handler
11991 * (including possibly emulating an SG instruction).
11993 if (sattrs.ns != !secure) {
11995 fi->type = ARMFault_QEMU_NSCExec;
11997 fi->type = ARMFault_QEMU_SFault;
11999 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
12000 *phys_ptr = address;
12005 /* For data accesses we always use the MMU bank indicated
12006 * by the current CPU state, but the security attributes
12007 * might downgrade a secure access to nonsecure.
12010 txattrs->secure = false;
12011 } else if (!secure) {
12012 /* NS access to S memory must fault.
12013 * Architecturally we should first check whether the
12014 * MPU information for this address indicates that we
12015 * are doing an unaligned access to Device memory, which
12016 * should generate a UsageFault instead. QEMU does not
12017 * currently check for that kind of unaligned access though.
12018 * If we added it we would need to do so as a special case
12019 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
12021 fi->type = ARMFault_QEMU_SFault;
12022 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
12023 *phys_ptr = address;
12030 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
12031 txattrs, prot, &mpu_is_subpage, fi, NULL);
12032 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
12036 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
12037 MMUAccessType access_type, ARMMMUIdx mmu_idx,
12038 hwaddr *phys_ptr, int *prot,
12039 ARMMMUFaultInfo *fi)
12044 bool is_user = regime_is_user(env, mmu_idx);
12046 if (regime_translation_disabled(env, mmu_idx)) {
12047 /* MPU disabled. */
12048 *phys_ptr = address;
12049 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
12053 *phys_ptr = address;
12054 for (n = 7; n >= 0; n--) {
12055 base = env->cp15.c6_region[n];
12056 if ((base & 1) == 0) {
12059 mask = 1 << ((base >> 1) & 0x1f);
12060 /* Keep this shift separate from the above to avoid an
12061 (undefined) << 32. */
12062 mask = (mask << 1) - 1;
12063 if (((base ^ address) & ~mask) == 0) {
12068 fi->type = ARMFault_Background;
12072 if (access_type == MMU_INST_FETCH) {
12073 mask = env->cp15.pmsav5_insn_ap;
12075 mask = env->cp15.pmsav5_data_ap;
12077 mask = (mask >> (n * 4)) & 0xf;
12080 fi->type = ARMFault_Permission;
12085 fi->type = ARMFault_Permission;
12089 *prot = PAGE_READ | PAGE_WRITE;
12094 *prot |= PAGE_WRITE;
12098 *prot = PAGE_READ | PAGE_WRITE;
12102 fi->type = ARMFault_Permission;
12112 /* Bad permission. */
12113 fi->type = ARMFault_Permission;
12117 *prot |= PAGE_EXEC;
12121 /* Combine either inner or outer cacheability attributes for normal
12122 * memory, according to table D4-42 and pseudocode procedure
12123 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
12125 * NB: only stage 1 includes allocation hints (RW bits), leading to
12128 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
12130 if (s1 == 4 || s2 == 4) {
12131 /* non-cacheable has precedence */
12133 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
12134 /* stage 1 write-through takes precedence */
12136 } else if (extract32(s2, 2, 2) == 2) {
12137 /* stage 2 write-through takes precedence, but the allocation hint
12138 * is still taken from stage 1
12140 return (2 << 2) | extract32(s1, 0, 2);
12141 } else { /* write-back */
12146 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
12147 * and CombineS1S2Desc()
12149 * @s1: Attributes from stage 1 walk
12150 * @s2: Attributes from stage 2 walk
12152 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
12154 uint8_t s1lo, s2lo, s1hi, s2hi;
12156 bool tagged = false;
12158 if (s1.attrs == 0xf0) {
12163 s1lo = extract32(s1.attrs, 0, 4);
12164 s2lo = extract32(s2.attrs, 0, 4);
12165 s1hi = extract32(s1.attrs, 4, 4);
12166 s2hi = extract32(s2.attrs, 4, 4);
12168 /* Combine shareability attributes (table D4-43) */
12169 if (s1.shareability == 2 || s2.shareability == 2) {
12170 /* if either are outer-shareable, the result is outer-shareable */
12171 ret.shareability = 2;
12172 } else if (s1.shareability == 3 || s2.shareability == 3) {
12173 /* if either are inner-shareable, the result is inner-shareable */
12174 ret.shareability = 3;
12176 /* both non-shareable */
12177 ret.shareability = 0;
12180 /* Combine memory type and cacheability attributes */
12181 if (s1hi == 0 || s2hi == 0) {
12182 /* Device has precedence over normal */
12183 if (s1lo == 0 || s2lo == 0) {
12184 /* nGnRnE has precedence over anything */
12186 } else if (s1lo == 4 || s2lo == 4) {
12187 /* non-Reordering has precedence over Reordering */
12188 ret.attrs = 4; /* nGnRE */
12189 } else if (s1lo == 8 || s2lo == 8) {
12190 /* non-Gathering has precedence over Gathering */
12191 ret.attrs = 8; /* nGRE */
12193 ret.attrs = 0xc; /* GRE */
12196 /* Any location for which the resultant memory type is any
12197 * type of Device memory is always treated as Outer Shareable.
12199 ret.shareability = 2;
12200 } else { /* Normal memory */
12201 /* Outer/inner cacheability combine independently */
12202 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
12203 | combine_cacheattr_nibble(s1lo, s2lo);
12205 if (ret.attrs == 0x44) {
12206 /* Any location for which the resultant memory type is Normal
12207 * Inner Non-cacheable, Outer Non-cacheable is always treated
12208 * as Outer Shareable.
12210 ret.shareability = 2;
12214 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
12215 if (tagged && ret.attrs == 0xff) {
12223 /* get_phys_addr - get the physical address for this virtual address
12225 * Find the physical address corresponding to the given virtual address,
12226 * by doing a translation table walk on MMU based systems or using the
12227 * MPU state on MPU based systems.
12229 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
12230 * prot and page_size may not be filled in, and the populated fsr value provides
12231 * information on why the translation aborted, in the format of a
12232 * DFSR/IFSR fault register, with the following caveats:
12233 * * we honour the short vs long DFSR format differences.
12234 * * the WnR bit is never set (the caller must do this).
12235 * * for PSMAv5 based systems we don't bother to return a full FSR format
12238 * @env: CPUARMState
12239 * @address: virtual address to get physical address for
12240 * @access_type: 0 for read, 1 for write, 2 for execute
12241 * @mmu_idx: MMU index indicating required translation regime
12242 * @phys_ptr: set to the physical address corresponding to the virtual address
12243 * @attrs: set to the memory transaction attributes to use
12244 * @prot: set to the permissions for the page containing phys_ptr
12245 * @page_size: set to the size of the page containing phys_ptr
12246 * @fi: set to fault info if the translation fails
12247 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
12249 bool get_phys_addr(CPUARMState *env, target_ulong address,
12250 MMUAccessType access_type, ARMMMUIdx mmu_idx,
12251 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
12252 target_ulong *page_size,
12253 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
12255 ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
12257 if (mmu_idx != s1_mmu_idx) {
12258 /* Call ourselves recursively to do the stage 1 and then stage 2
12259 * translations if mmu_idx is a two-stage regime.
12261 if (arm_feature(env, ARM_FEATURE_EL2)) {
12265 ARMCacheAttrs cacheattrs2 = {};
12266 ARMMMUIdx s2_mmu_idx;
12269 ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
12270 attrs, prot, page_size, fi, cacheattrs);
12272 /* If S1 fails or S2 is disabled, return early. */
12273 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
12278 s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
12279 is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
12281 /* S1 is done. Now do S2 translation. */
12282 ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
12283 phys_ptr, attrs, &s2_prot,
12284 page_size, fi, &cacheattrs2);
12286 /* Combine the S1 and S2 perms. */
12289 /* If S2 fails, return early. */
12294 /* Combine the S1 and S2 cache attributes. */
12295 if (arm_hcr_el2_eff(env) & HCR_DC) {
12297 * HCR.DC forces the first stage attributes to
12298 * Normal Non-Shareable,
12299 * Inner Write-Back Read-Allocate Write-Allocate,
12300 * Outer Write-Back Read-Allocate Write-Allocate.
12301 * Do not overwrite Tagged within attrs.
12303 if (cacheattrs->attrs != 0xf0) {
12304 cacheattrs->attrs = 0xff;
12306 cacheattrs->shareability = 0;
12308 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
12310 /* Check if IPA translates to secure or non-secure PA space. */
12311 if (arm_is_secure_below_el3(env)) {
12312 if (attrs->secure) {
12314 !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
12317 !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
12318 || (env->cp15.vstcr_el2.raw_tcr & VSTCR_SA));
12324 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
12326 mmu_idx = stage_1_mmu_idx(mmu_idx);
12330 /* The page table entries may downgrade secure to non-secure, but
12331 * cannot upgrade an non-secure translation regime's attributes
12334 attrs->secure = regime_is_secure(env, mmu_idx);
12335 attrs->user = regime_is_user(env, mmu_idx);
12337 /* Fast Context Switch Extension. This doesn't exist at all in v8.
12338 * In v7 and earlier it affects all stage 1 translations.
12340 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
12341 && !arm_feature(env, ARM_FEATURE_V8)) {
12342 if (regime_el(env, mmu_idx) == 3) {
12343 address += env->cp15.fcseidr_s;
12345 address += env->cp15.fcseidr_ns;
12349 if (arm_feature(env, ARM_FEATURE_PMSA)) {
12351 *page_size = TARGET_PAGE_SIZE;
12353 if (arm_feature(env, ARM_FEATURE_V8)) {
12355 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
12356 phys_ptr, attrs, prot, page_size, fi);
12357 } else if (arm_feature(env, ARM_FEATURE_V7)) {
12359 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
12360 phys_ptr, prot, page_size, fi);
12363 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
12364 phys_ptr, prot, fi);
12366 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
12367 " mmu_idx %u -> %s (prot %c%c%c)\n",
12368 access_type == MMU_DATA_LOAD ? "reading" :
12369 (access_type == MMU_DATA_STORE ? "writing" : "execute"),
12370 (uint32_t)address, mmu_idx,
12371 ret ? "Miss" : "Hit",
12372 *prot & PAGE_READ ? 'r' : '-',
12373 *prot & PAGE_WRITE ? 'w' : '-',
12374 *prot & PAGE_EXEC ? 'x' : '-');
12379 /* Definitely a real MMU, not an MPU */
12381 if (regime_translation_disabled(env, mmu_idx)) {
12386 * MMU disabled. S1 addresses within aa64 translation regimes are
12387 * still checked for bounds -- see AArch64.TranslateAddressS1Off.
12389 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
12390 int r_el = regime_el(env, mmu_idx);
12391 if (arm_el_is_aa64(env, r_el)) {
12392 int pamax = arm_pamax(env_archcpu(env));
12393 uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
12396 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
12397 if (access_type == MMU_INST_FETCH) {
12398 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
12400 tbi = (tbi >> extract64(address, 55, 1)) & 1;
12401 addrtop = (tbi ? 55 : 63);
12403 if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
12404 fi->type = ARMFault_AddressSize;
12406 fi->stage2 = false;
12411 * When TBI is disabled, we've just validated that all of the
12412 * bits above PAMax are zero, so logically we only need to
12413 * clear the top byte for TBI. But it's clearer to follow
12414 * the pseudocode set of addrdesc.paddress.
12416 address = extract64(address, 0, 52);
12419 *phys_ptr = address;
12420 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
12421 *page_size = TARGET_PAGE_SIZE;
12423 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
12424 hcr = arm_hcr_el2_eff(env);
12425 cacheattrs->shareability = 0;
12426 if (hcr & HCR_DC) {
12427 if (hcr & HCR_DCT) {
12428 memattr = 0xf0; /* Tagged, Normal, WB, RWA */
12430 memattr = 0xff; /* Normal, WB, RWA */
12432 } else if (access_type == MMU_INST_FETCH) {
12433 if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
12434 memattr = 0xee; /* Normal, WT, RA, NT */
12436 memattr = 0x44; /* Normal, NC, No */
12438 cacheattrs->shareability = 2; /* outer sharable */
12440 memattr = 0x00; /* Device, nGnRnE */
12442 cacheattrs->attrs = memattr;
12446 if (regime_using_lpae_format(env, mmu_idx)) {
12447 return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
12448 phys_ptr, attrs, prot, page_size,
12450 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
12451 return get_phys_addr_v6(env, address, access_type, mmu_idx,
12452 phys_ptr, attrs, prot, page_size, fi);
12454 return get_phys_addr_v5(env, address, access_type, mmu_idx,
12455 phys_ptr, prot, page_size, fi);
12459 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
12462 ARMCPU *cpu = ARM_CPU(cs);
12463 CPUARMState *env = &cpu->env;
12465 target_ulong page_size;
12468 ARMMMUFaultInfo fi = {};
12469 ARMMMUIdx mmu_idx = arm_mmu_idx(env);
12470 ARMCacheAttrs cacheattrs = {};
12472 *attrs = (MemTxAttrs) {};
12474 ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
12475 attrs, &prot, &page_size, &fi, &cacheattrs);
12485 /* Note that signed overflow is undefined in C. The following routines are
12486 careful to use unsigned types where modulo arithmetic is required.
12487 Failure to do so _will_ break on newer gcc. */
12489 /* Signed saturating arithmetic. */
12491 /* Perform 16-bit signed saturating addition. */
12492 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
12497 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
12506 /* Perform 8-bit signed saturating addition. */
12507 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
12512 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
12521 /* Perform 16-bit signed saturating subtraction. */
12522 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
12527 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
12536 /* Perform 8-bit signed saturating subtraction. */
12537 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
12542 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
12551 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12552 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12553 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
12554 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
12557 #include "op_addsub.h"
12559 /* Unsigned saturating arithmetic. */
12560 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
12569 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
12577 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
12586 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
12594 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12595 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12596 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
12597 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
12600 #include "op_addsub.h"
12602 /* Signed modulo arithmetic. */
12603 #define SARITH16(a, b, n, op) do { \
12605 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12606 RESULT(sum, n, 16); \
12608 ge |= 3 << (n * 2); \
12611 #define SARITH8(a, b, n, op) do { \
12613 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12614 RESULT(sum, n, 8); \
12620 #define ADD16(a, b, n) SARITH16(a, b, n, +)
12621 #define SUB16(a, b, n) SARITH16(a, b, n, -)
12622 #define ADD8(a, b, n) SARITH8(a, b, n, +)
12623 #define SUB8(a, b, n) SARITH8(a, b, n, -)
12627 #include "op_addsub.h"
12629 /* Unsigned modulo arithmetic. */
12630 #define ADD16(a, b, n) do { \
12632 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12633 RESULT(sum, n, 16); \
12634 if ((sum >> 16) == 1) \
12635 ge |= 3 << (n * 2); \
12638 #define ADD8(a, b, n) do { \
12640 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12641 RESULT(sum, n, 8); \
12642 if ((sum >> 8) == 1) \
12646 #define SUB16(a, b, n) do { \
12648 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12649 RESULT(sum, n, 16); \
12650 if ((sum >> 16) == 0) \
12651 ge |= 3 << (n * 2); \
12654 #define SUB8(a, b, n) do { \
12656 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12657 RESULT(sum, n, 8); \
12658 if ((sum >> 8) == 0) \
12665 #include "op_addsub.h"
12667 /* Halved signed arithmetic. */
12668 #define ADD16(a, b, n) \
12669 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12670 #define SUB16(a, b, n) \
12671 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12672 #define ADD8(a, b, n) \
12673 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12674 #define SUB8(a, b, n) \
12675 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12678 #include "op_addsub.h"
12680 /* Halved unsigned arithmetic. */
12681 #define ADD16(a, b, n) \
12682 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12683 #define SUB16(a, b, n) \
12684 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12685 #define ADD8(a, b, n) \
12686 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12687 #define SUB8(a, b, n) \
12688 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12691 #include "op_addsub.h"
12693 static inline uint8_t do_usad(uint8_t a, uint8_t b)
12701 /* Unsigned sum of absolute byte differences. */
12702 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
12705 sum = do_usad(a, b);
12706 sum += do_usad(a >> 8, b >> 8);
12707 sum += do_usad(a >> 16, b >> 16);
12708 sum += do_usad(a >> 24, b >> 24);
12712 /* For ARMv6 SEL instruction. */
12713 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
12725 mask |= 0xff000000;
12726 return (a & mask) | (b & ~mask);
12730 * The upper bytes of val (above the number specified by 'bytes') must have
12731 * been zeroed out by the caller.
12733 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12737 stl_le_p(buf, val);
12739 /* zlib crc32 converts the accumulator and output to one's complement. */
12740 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12743 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12747 stl_le_p(buf, val);
12749 /* Linux crc32c converts the output to one's complement. */
12750 return crc32c(acc, buf, bytes) ^ 0xffffffff;
12753 /* Return the exception level to which FP-disabled exceptions should
12754 * be taken, or 0 if FP is enabled.
12756 int fp_exception_el(CPUARMState *env, int cur_el)
12758 #ifndef CONFIG_USER_ONLY
12759 /* CPACR and the CPTR registers don't exist before v6, so FP is
12760 * always accessible
12762 if (!arm_feature(env, ARM_FEATURE_V6)) {
12766 if (arm_feature(env, ARM_FEATURE_M)) {
12767 /* CPACR can cause a NOCP UsageFault taken to current security state */
12768 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
12772 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
12773 if (!extract32(env->v7m.nsacr, 10, 1)) {
12774 /* FP insns cause a NOCP UsageFault taken to Secure */
12782 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12783 * 0, 2 : trap EL0 and EL1/PL1 accesses
12784 * 1 : trap only EL0 accesses
12785 * 3 : trap no accesses
12786 * This register is ignored if E2H+TGE are both set.
12788 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
12789 int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
12794 if (cur_el == 0 || cur_el == 1) {
12795 /* Trap to PL1, which might be EL1 or EL3 */
12796 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
12801 if (cur_el == 3 && !is_a64(env)) {
12802 /* Secure PL1 running at EL3 */
12817 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12818 * to control non-secure access to the FPU. It doesn't have any
12819 * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12821 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
12822 cur_el <= 2 && !arm_is_secure_below_el3(env))) {
12823 if (!extract32(env->cp15.nsacr, 10, 1)) {
12824 /* FP insns act as UNDEF */
12825 return cur_el == 2 ? 2 : 1;
12829 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12830 * check because zero bits in the registers mean "don't trap".
12833 /* CPTR_EL2 : present in v7VE or v8 */
12834 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
12835 && arm_is_el2_enabled(env)) {
12836 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12840 /* CPTR_EL3 : present in v8 */
12841 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
12842 /* Trap all FP ops to EL3 */
12849 /* Return the exception level we're running at if this is our mmu_idx */
12850 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
12852 if (mmu_idx & ARM_MMU_IDX_M) {
12853 return mmu_idx & ARM_MMU_IDX_M_PRIV;
12857 case ARMMMUIdx_E10_0:
12858 case ARMMMUIdx_E20_0:
12859 case ARMMMUIdx_SE10_0:
12860 case ARMMMUIdx_SE20_0:
12862 case ARMMMUIdx_E10_1:
12863 case ARMMMUIdx_E10_1_PAN:
12864 case ARMMMUIdx_SE10_1:
12865 case ARMMMUIdx_SE10_1_PAN:
12868 case ARMMMUIdx_E20_2:
12869 case ARMMMUIdx_E20_2_PAN:
12870 case ARMMMUIdx_SE2:
12871 case ARMMMUIdx_SE20_2:
12872 case ARMMMUIdx_SE20_2_PAN:
12874 case ARMMMUIdx_SE3:
12877 g_assert_not_reached();
12882 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
12884 g_assert_not_reached();
12888 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
12893 if (arm_feature(env, ARM_FEATURE_M)) {
12894 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
12897 /* See ARM pseudo-function ELIsInHost. */
12900 hcr = arm_hcr_el2_eff(env);
12901 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
12902 idx = ARMMMUIdx_E20_0;
12904 idx = ARMMMUIdx_E10_0;
12908 if (env->pstate & PSTATE_PAN) {
12909 idx = ARMMMUIdx_E10_1_PAN;
12911 idx = ARMMMUIdx_E10_1;
12915 /* Note that TGE does not apply at EL2. */
12916 if (arm_hcr_el2_eff(env) & HCR_E2H) {
12917 if (env->pstate & PSTATE_PAN) {
12918 idx = ARMMMUIdx_E20_2_PAN;
12920 idx = ARMMMUIdx_E20_2;
12923 idx = ARMMMUIdx_E2;
12927 return ARMMMUIdx_SE3;
12929 g_assert_not_reached();
12932 if (arm_is_secure_below_el3(env)) {
12933 idx &= ~ARM_MMU_IDX_A_NS;
12939 ARMMMUIdx arm_mmu_idx(CPUARMState *env)
12941 return arm_mmu_idx_el(env, arm_current_el(env));
12944 #ifndef CONFIG_USER_ONLY
12945 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
12947 return stage_1_mmu_idx(arm_mmu_idx(env));
12951 static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
12952 ARMMMUIdx mmu_idx, uint32_t flags)
12954 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
12955 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
12956 arm_to_core_mmu_idx(mmu_idx));
12958 if (arm_singlestep_active(env)) {
12959 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
12964 static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
12965 ARMMMUIdx mmu_idx, uint32_t flags)
12967 bool sctlr_b = arm_sctlr_b(env);
12970 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
12972 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
12973 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
12975 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
12977 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
12980 static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
12983 uint32_t flags = 0;
12985 if (arm_v7m_is_handler_mode(env)) {
12986 flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
12990 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
12991 * is suppressing them because the requested execution priority
12994 if (arm_feature(env, ARM_FEATURE_V8) &&
12995 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
12996 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
12997 flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
13000 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
13003 static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
13007 flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
13008 arm_debug_target_el(env));
13012 static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
13015 uint32_t flags = rebuild_hflags_aprofile(env);
13017 if (arm_el_is_aa64(env, 1)) {
13018 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
13021 if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
13022 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
13023 flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1);
13026 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
13029 static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
13032 uint32_t flags = rebuild_hflags_aprofile(env);
13033 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
13034 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
13038 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
13040 /* Get control bits for tagged addresses. */
13041 tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
13042 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
13044 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
13045 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
13047 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
13048 int sve_el = sve_exception_el(env, el);
13052 * If SVE is disabled, but FP is enabled,
13053 * then the effective len is 0.
13055 if (sve_el != 0 && fp_el == 0) {
13058 zcr_len = sve_zcr_len_for_el(env, el);
13060 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
13061 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
13064 sctlr = regime_sctlr(env, stage1);
13066 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
13067 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
13070 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
13072 * In order to save space in flags, we record only whether
13073 * pauth is "inactive", meaning all insns are implemented as
13074 * a nop, or "active" when some action must be performed.
13075 * The decision of which action to take is left to a helper.
13077 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
13078 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
13082 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
13083 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
13084 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
13085 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
13089 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
13090 if (!(env->pstate & PSTATE_UAO)) {
13092 case ARMMMUIdx_E10_1:
13093 case ARMMMUIdx_E10_1_PAN:
13094 case ARMMMUIdx_SE10_1:
13095 case ARMMMUIdx_SE10_1_PAN:
13096 /* TODO: ARMv8.3-NV */
13097 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
13099 case ARMMMUIdx_E20_2:
13100 case ARMMMUIdx_E20_2_PAN:
13101 case ARMMMUIdx_SE20_2:
13102 case ARMMMUIdx_SE20_2_PAN:
13104 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
13105 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
13107 if (env->cp15.hcr_el2 & HCR_TGE) {
13108 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
13116 if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
13118 * Set MTE_ACTIVE if any access may be Checked, and leave clear
13119 * if all accesses must be Unchecked:
13120 * 1) If no TBI, then there are no tags in the address to check,
13121 * 2) If Tag Check Override, then all accesses are Unchecked,
13122 * 3) If Tag Check Fail == 0, then Checked access have no effect,
13123 * 4) If no Allocation Tag Access, then all accesses are Unchecked.
13125 if (allocation_tag_access_enabled(env, el, sctlr)) {
13126 flags = FIELD_DP32(flags, TBFLAG_A64, ATA, 1);
13128 && !(env->pstate & PSTATE_TCO)
13129 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
13130 flags = FIELD_DP32(flags, TBFLAG_A64, MTE_ACTIVE, 1);
13133 /* And again for unprivileged accesses, if required. */
13134 if (FIELD_EX32(flags, TBFLAG_A64, UNPRIV)
13136 && !(env->pstate & PSTATE_TCO)
13137 && (sctlr & SCTLR_TCF)
13138 && allocation_tag_access_enabled(env, 0, sctlr)) {
13139 flags = FIELD_DP32(flags, TBFLAG_A64, MTE0_ACTIVE, 1);
13141 /* Cache TCMA as well as TBI. */
13142 flags = FIELD_DP32(flags, TBFLAG_A64, TCMA,
13143 aa64_va_parameter_tcma(tcr, mmu_idx));
13146 return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
13149 static uint32_t rebuild_hflags_internal(CPUARMState *env)
13151 int el = arm_current_el(env);
13152 int fp_el = fp_exception_el(env, el);
13153 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13156 return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
13157 } else if (arm_feature(env, ARM_FEATURE_M)) {
13158 return rebuild_hflags_m32(env, fp_el, mmu_idx);
13160 return rebuild_hflags_a32(env, fp_el, mmu_idx);
13164 void arm_rebuild_hflags(CPUARMState *env)
13166 env->hflags = rebuild_hflags_internal(env);
13170 * If we have triggered a EL state change we can't rely on the
13171 * translator having passed it to us, we need to recompute.
13173 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
13175 int el = arm_current_el(env);
13176 int fp_el = fp_exception_el(env, el);
13177 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13178 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
13181 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
13183 int fp_el = fp_exception_el(env, el);
13184 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13186 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
13190 * If we have triggered a EL state change we can't rely on the
13191 * translator having passed it to us, we need to recompute.
13193 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
13195 int el = arm_current_el(env);
13196 int fp_el = fp_exception_el(env, el);
13197 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13198 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
13201 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
13203 int fp_el = fp_exception_el(env, el);
13204 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13206 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
13209 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
13211 int fp_el = fp_exception_el(env, el);
13212 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13214 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
13217 static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
13219 #ifdef CONFIG_DEBUG_TCG
13220 uint32_t env_flags_current = env->hflags;
13221 uint32_t env_flags_rebuilt = rebuild_hflags_internal(env);
13223 if (unlikely(env_flags_current != env_flags_rebuilt)) {
13224 fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
13225 env_flags_current, env_flags_rebuilt);
13231 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
13232 target_ulong *cs_base, uint32_t *pflags)
13234 uint32_t flags = env->hflags;
13237 assert_hflags_rebuild_correctly(env);
13239 if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
13241 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
13242 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
13245 *pc = env->regs[15];
13247 if (arm_feature(env, ARM_FEATURE_M)) {
13248 if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13249 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
13250 != env->v7m.secure) {
13251 flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
13254 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
13255 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
13256 (env->v7m.secure &&
13257 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
13259 * ASPEN is set, but FPCA/SFPA indicate that there is no
13260 * active FP context; we must create a new FP context before
13261 * executing any FP insn.
13263 flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
13266 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
13267 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
13268 flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
13272 * Note that XSCALE_CPAR shares bits with VECSTRIDE.
13273 * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
13275 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
13276 flags = FIELD_DP32(flags, TBFLAG_A32,
13277 XSCALE_CPAR, env->cp15.c15_cpar);
13279 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
13281 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
13282 env->vfp.vec_stride);
13284 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
13285 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
13289 flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
13290 flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
13294 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13295 * states defined in the ARM ARM for software singlestep:
13296 * SS_ACTIVE PSTATE.SS State
13297 * 0 x Inactive (the TB flag for SS is always 0)
13298 * 1 0 Active-pending
13299 * 1 1 Active-not-pending
13300 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
13302 if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
13303 (env->pstate & PSTATE_SS)) {
13304 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
13310 #ifdef TARGET_AARCH64
13312 * The manual says that when SVE is enabled and VQ is widened the
13313 * implementation is allowed to zero the previously inaccessible
13314 * portion of the registers. The corollary to that is that when
13315 * SVE is enabled and VQ is narrowed we are also allowed to zero
13316 * the now inaccessible portion of the registers.
13318 * The intent of this is that no predicate bit beyond VQ is ever set.
13319 * Which means that some operations on predicate registers themselves
13320 * may operate on full uint64_t or even unrolled across the maximum
13321 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally
13322 * may well be cheaper than conditionals to restrict the operation
13323 * to the relevant portion of a uint16_t[16].
13325 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
13330 assert(vq >= 1 && vq <= ARM_MAX_VQ);
13331 assert(vq <= env_archcpu(env)->sve_max_vq);
13333 /* Zap the high bits of the zregs. */
13334 for (i = 0; i < 32; i++) {
13335 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
13338 /* Zap the high bits of the pregs and ffr. */
13341 pmask = ~(-1ULL << (16 * (vq & 3)));
13343 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
13344 for (i = 0; i < 17; ++i) {
13345 env->vfp.pregs[i].p[j] &= pmask;
13352 * Notice a change in SVE vector size when changing EL.
13354 void aarch64_sve_change_el(CPUARMState *env, int old_el,
13355 int new_el, bool el0_a64)
13357 ARMCPU *cpu = env_archcpu(env);
13358 int old_len, new_len;
13359 bool old_a64, new_a64;
13361 /* Nothing to do if no SVE. */
13362 if (!cpu_isar_feature(aa64_sve, cpu)) {
13366 /* Nothing to do if FP is disabled in either EL. */
13367 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
13372 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13373 * at ELx, or not available because the EL is in AArch32 state, then
13374 * for all purposes other than a direct read, the ZCR_ELx.LEN field
13375 * has an effective value of 0".
13377 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13378 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13379 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that
13380 * we already have the correct register contents when encountering the
13381 * vq0->vq0 transition between EL0->EL1.
13383 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
13384 old_len = (old_a64 && !sve_exception_el(env, old_el)
13385 ? sve_zcr_len_for_el(env, old_el) : 0);
13386 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
13387 new_len = (new_a64 && !sve_exception_el(env, new_el)
13388 ? sve_zcr_len_for_el(env, new_el) : 0);
13390 /* When changing vector length, clear inaccessible state. */
13391 if (new_len < old_len) {
13392 aarch64_sve_narrow_vq(env, new_len + 1);