2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
32 /* register banks for CPU modes */
42 static inline bool excp_is_internal(int excp)
44 /* Return true if this exception number represents a QEMU-internal
45 * exception that will not be passed to the guest.
47 return excp == EXCP_INTERRUPT
50 || excp == EXCP_HALTED
51 || excp == EXCP_EXCEPTION_EXIT
52 || excp == EXCP_KERNEL_TRAP
53 || excp == EXCP_SEMIHOST;
56 /* Scale factor for generic timers, ie number of ns per tick.
57 * This gives a 62.5MHz timer.
59 #define GTIMER_SCALE 16
61 /* Bit definitions for the v7M CONTROL register */
62 FIELD(V7M_CONTROL, NPRIV, 0, 1)
63 FIELD(V7M_CONTROL, SPSEL, 1, 1)
64 FIELD(V7M_CONTROL, FPCA, 2, 1)
65 FIELD(V7M_CONTROL, SFPA, 3, 1)
67 /* Bit definitions for v7M exception return payload */
68 FIELD(V7M_EXCRET, ES, 0, 1)
69 FIELD(V7M_EXCRET, RES0, 1, 1)
70 FIELD(V7M_EXCRET, SPSEL, 2, 1)
71 FIELD(V7M_EXCRET, MODE, 3, 1)
72 FIELD(V7M_EXCRET, FTYPE, 4, 1)
73 FIELD(V7M_EXCRET, DCRS, 5, 1)
74 FIELD(V7M_EXCRET, S, 6, 1)
75 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
77 /* Minimum value which is a magic number for exception return */
78 #define EXC_RETURN_MIN_MAGIC 0xff000000
79 /* Minimum number which is a magic number for function or exception return
80 * when using v8M security extension
82 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
84 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
85 FIELD(DBGWCR, E, 0, 1)
86 FIELD(DBGWCR, PAC, 1, 2)
87 FIELD(DBGWCR, LSC, 3, 2)
88 FIELD(DBGWCR, BAS, 5, 8)
89 FIELD(DBGWCR, HMC, 13, 1)
90 FIELD(DBGWCR, SSC, 14, 2)
91 FIELD(DBGWCR, LBN, 16, 4)
92 FIELD(DBGWCR, WT, 20, 1)
93 FIELD(DBGWCR, MASK, 24, 5)
94 FIELD(DBGWCR, SSCE, 29, 1)
96 /* We use a few fake FSR values for internal purposes in M profile.
97 * M profile cores don't have A/R format FSRs, but currently our
98 * get_phys_addr() code assumes A/R profile and reports failures via
99 * an A/R format FSR value. We then translate that into the proper
100 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
101 * Mostly the FSR values we use for this are those defined for v7PMSA,
102 * since we share some of that codepath. A few kinds of fault are
103 * only for M profile and have no A/R equivalent, though, so we have
104 * to pick a value from the reserved range (which we never otherwise
105 * generate) to use for these.
106 * These values will never be visible to the guest.
108 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
109 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
112 * raise_exception: Raise the specified exception.
113 * Raise a guest exception with the specified value, syndrome register
114 * and target exception level. This should be called from helper functions,
115 * and never returns because we will longjump back up to the CPU main loop.
117 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
118 uint32_t syndrome, uint32_t target_el);
121 * Similarly, but also use unwinding to restore cpu state.
123 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
124 uint32_t syndrome, uint32_t target_el,
128 * For AArch64, map a given EL to an index in the banked_spsr array.
129 * Note that this mapping and the AArch32 mapping defined in bank_number()
130 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
131 * mandated mapping between each other.
133 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
135 static const unsigned int map[4] = {
136 [1] = BANK_SVC, /* EL1. */
137 [2] = BANK_HYP, /* EL2. */
138 [3] = BANK_MON, /* EL3. */
140 assert(el >= 1 && el <= 3);
144 /* Map CPU modes onto saved register banks. */
145 static inline int bank_number(int mode)
148 case ARM_CPU_MODE_USR:
149 case ARM_CPU_MODE_SYS:
151 case ARM_CPU_MODE_SVC:
153 case ARM_CPU_MODE_ABT:
155 case ARM_CPU_MODE_UND:
157 case ARM_CPU_MODE_IRQ:
159 case ARM_CPU_MODE_FIQ:
161 case ARM_CPU_MODE_HYP:
163 case ARM_CPU_MODE_MON:
166 g_assert_not_reached();
170 * r14_bank_number: Map CPU mode onto register bank for r14
172 * Given an AArch32 CPU mode, return the index into the saved register
173 * banks to use for the R14 (LR) in that mode. This is the same as
174 * bank_number(), except for the special case of Hyp mode, where
175 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
176 * This should be used as the index into env->banked_r14[], and
177 * bank_number() used for the index into env->banked_r13[] and
178 * env->banked_spsr[].
180 static inline int r14_bank_number(int mode)
182 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
185 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
186 void arm_translate_init(void);
189 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
190 #endif /* CONFIG_TCG */
192 enum arm_fprounding {
201 int arm_rmode_to_sf(int rmode);
203 static inline void aarch64_save_sp(CPUARMState *env, int el)
205 if (env->pstate & PSTATE_SP) {
206 env->sp_el[el] = env->xregs[31];
208 env->sp_el[0] = env->xregs[31];
212 static inline void aarch64_restore_sp(CPUARMState *env, int el)
214 if (env->pstate & PSTATE_SP) {
215 env->xregs[31] = env->sp_el[el];
217 env->xregs[31] = env->sp_el[0];
221 static inline void update_spsel(CPUARMState *env, uint32_t imm)
223 unsigned int cur_el = arm_current_el(env);
224 /* Update PSTATE SPSel bit; this requires us to update the
225 * working stack pointer in xregs[31].
227 if (!((imm ^ env->pstate) & PSTATE_SP)) {
230 aarch64_save_sp(env, cur_el);
231 env->pstate = deposit32(env->pstate, 0, 1, imm);
233 /* We rely on illegal updates to SPsel from EL0 to get trapped
234 * at translation time.
236 assert(cur_el >= 1 && cur_el <= 3);
237 aarch64_restore_sp(env, cur_el);
244 * Returns the implementation defined bit-width of physical addresses.
245 * The ARMv8 reference manuals refer to this as PAMax().
247 unsigned int arm_pamax(ARMCPU *cpu);
249 /* Return true if extended addresses are enabled.
250 * This is always the case if our translation regime is 64 bit,
251 * but depends on TTBCR.EAE for 32 bit.
253 static inline bool extended_addresses_enabled(CPUARMState *env)
255 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
256 return arm_el_is_aa64(env, 1) ||
257 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
260 /* Update a QEMU watchpoint based on the information the guest has set in the
261 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
263 void hw_watchpoint_update(ARMCPU *cpu, int n);
264 /* Update the QEMU watchpoints for every guest watchpoint. This does a
265 * complete delete-and-reinstate of the QEMU watchpoint list and so is
266 * suitable for use after migration or on reset.
268 void hw_watchpoint_update_all(ARMCPU *cpu);
269 /* Update a QEMU breakpoint based on the information the guest has set in the
270 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
272 void hw_breakpoint_update(ARMCPU *cpu, int n);
273 /* Update the QEMU breakpoints for every guest breakpoint. This does a
274 * complete delete-and-reinstate of the QEMU breakpoint list and so is
275 * suitable for use after migration or on reset.
277 void hw_breakpoint_update_all(ARMCPU *cpu);
279 /* Callback function for checking if a breakpoint should trigger. */
280 bool arm_debug_check_breakpoint(CPUState *cs);
282 /* Callback function for checking if a watchpoint should trigger. */
283 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
285 /* Adjust addresses (in BE32 mode) before testing against watchpoint
288 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
290 /* Callback function for when a watchpoint or breakpoint triggers. */
291 void arm_debug_excp_handler(CPUState *cs);
293 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
294 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
298 static inline void arm_handle_psci_call(ARMCPU *cpu)
300 g_assert_not_reached();
303 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
304 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
305 /* Actually handle a PSCI call */
306 void arm_handle_psci_call(ARMCPU *cpu);
310 * arm_clear_exclusive: clear the exclusive monitor
312 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
314 static inline void arm_clear_exclusive(CPUARMState *env)
316 env->exclusive_addr = -1;
320 * ARMFaultType: type of an ARM MMU fault
321 * This corresponds to the v8A pseudocode's Fault enumeration,
322 * with extensions for QEMU internal conditions.
324 typedef enum ARMFaultType {
331 ARMFault_Translation,
332 ARMFault_AddressSize,
333 ARMFault_SyncExternal,
334 ARMFault_SyncExternalOnWalk,
336 ARMFault_SyncParityOnWalk,
337 ARMFault_AsyncParity,
338 ARMFault_AsyncExternal,
340 ARMFault_TLBConflict,
343 ARMFault_ICacheMaint,
344 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
345 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
349 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
350 * @type: Type of fault
351 * @level: Table walk level (for translation, access flag and permission faults)
352 * @domain: Domain of the fault address (for non-LPAE CPUs only)
353 * @s2addr: Address that caused a fault at stage 2
354 * @stage2: True if we faulted at stage 2
355 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
356 * @s1ns: True if we faulted on a non-secure IPA while in secure state
357 * @ea: True if we should set the EA (external abort type) bit in syndrome
359 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
360 struct ARMMMUFaultInfo {
372 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
373 * Compare pseudocode EncodeSDFSC(), though unlike that function
374 * we set up a whole FSR-format code including domain field and
375 * putting the high bit of the FSC into bit 10.
377 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
384 case ARMFault_AccessFlag:
385 fsc = fi->level == 1 ? 0x3 : 0x6;
387 case ARMFault_Alignment:
390 case ARMFault_Permission:
391 fsc = fi->level == 1 ? 0xd : 0xf;
393 case ARMFault_Domain:
394 fsc = fi->level == 1 ? 0x9 : 0xb;
396 case ARMFault_Translation:
397 fsc = fi->level == 1 ? 0x5 : 0x7;
399 case ARMFault_SyncExternal:
400 fsc = 0x8 | (fi->ea << 12);
402 case ARMFault_SyncExternalOnWalk:
403 fsc = fi->level == 1 ? 0xc : 0xe;
404 fsc |= (fi->ea << 12);
406 case ARMFault_SyncParity:
409 case ARMFault_SyncParityOnWalk:
410 fsc = fi->level == 1 ? 0x40c : 0x40e;
412 case ARMFault_AsyncParity:
415 case ARMFault_AsyncExternal:
416 fsc = 0x406 | (fi->ea << 12);
421 case ARMFault_TLBConflict:
424 case ARMFault_Lockdown:
427 case ARMFault_Exclusive:
430 case ARMFault_ICacheMaint:
433 case ARMFault_Background:
436 case ARMFault_QEMU_NSCExec:
437 fsc = M_FAKE_FSR_NSC_EXEC;
439 case ARMFault_QEMU_SFault:
440 fsc = M_FAKE_FSR_SFAULT;
443 /* Other faults can't occur in a context that requires a
444 * short-format status code.
446 g_assert_not_reached();
449 fsc |= (fi->domain << 4);
454 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
455 * Compare pseudocode EncodeLDFSC(), though unlike that function
456 * we fill in also the LPAE bit 9 of a DFSR format.
458 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
465 case ARMFault_AddressSize:
466 assert(fi->level >= -1 && fi->level <= 3);
473 case ARMFault_AccessFlag:
474 assert(fi->level >= 0 && fi->level <= 3);
475 fsc = 0b001000 | fi->level;
477 case ARMFault_Permission:
478 assert(fi->level >= 0 && fi->level <= 3);
479 fsc = 0b001100 | fi->level;
481 case ARMFault_Translation:
482 assert(fi->level >= -1 && fi->level <= 3);
486 fsc = 0b000100 | fi->level;
489 case ARMFault_SyncExternal:
490 fsc = 0x10 | (fi->ea << 12);
492 case ARMFault_SyncExternalOnWalk:
493 assert(fi->level >= -1 && fi->level <= 3);
497 fsc = 0b010100 | fi->level;
501 case ARMFault_SyncParity:
504 case ARMFault_SyncParityOnWalk:
505 assert(fi->level >= -1 && fi->level <= 3);
509 fsc = 0b011100 | fi->level;
512 case ARMFault_AsyncParity:
515 case ARMFault_AsyncExternal:
516 fsc = 0x11 | (fi->ea << 12);
518 case ARMFault_Alignment:
524 case ARMFault_TLBConflict:
527 case ARMFault_Lockdown:
530 case ARMFault_Exclusive:
534 /* Other faults can't occur in a context that requires a
535 * long-format status code.
537 g_assert_not_reached();
544 static inline bool arm_extabort_type(MemTxResult result)
546 /* The EA bit in syndromes and fault status registers is an
547 * IMPDEF classification of external aborts. ARM implementations
548 * usually use this to indicate AXI bus Decode error (0) or
549 * Slave error (1); in QEMU we follow that.
551 return result != MEMTX_DECODE_ERROR;
554 #ifdef CONFIG_USER_ONLY
555 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
556 MMUAccessType access_type,
557 bool maperr, uintptr_t ra);
558 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
559 MMUAccessType access_type, uintptr_t ra);
561 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
562 MMUAccessType access_type, int mmu_idx,
563 bool probe, uintptr_t retaddr);
566 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
568 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
571 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
573 if (arm_feature(env, ARM_FEATURE_M)) {
574 return mmu_idx | ARM_MMU_IDX_M;
576 return mmu_idx | ARM_MMU_IDX_A;
580 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
582 /* AArch64 is always a-profile. */
583 return mmu_idx | ARM_MMU_IDX_A;
586 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
589 * Return the MMU index for a v7M CPU with all relevant information
590 * manually specified.
592 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
593 bool secstate, bool priv, bool negpri);
596 * Return the MMU index for a v7M CPU in the specified security and
599 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
600 bool secstate, bool priv);
602 /* Return the MMU index for a v7M CPU in the specified security state */
603 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
605 /* Return true if the translation regime is using LPAE format page tables */
606 bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
609 * Return true if the stage 1 translation regime is using LPAE
612 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
614 /* Raise a data fault alignment exception for the specified virtual address */
615 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
616 MMUAccessType access_type,
617 int mmu_idx, uintptr_t retaddr);
619 /* arm_cpu_do_transaction_failed: handle a memory system error response
620 * (eg "no device/memory present at address") by raising an external abort
623 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
624 vaddr addr, unsigned size,
625 MMUAccessType access_type,
626 int mmu_idx, MemTxAttrs attrs,
627 MemTxResult response, uintptr_t retaddr);
629 /* Call any registered EL change hooks */
630 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
632 ARMELChangeHook *hook, *next;
633 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
634 hook->hook(cpu, hook->opaque);
637 static inline void arm_call_el_change_hook(ARMCPU *cpu)
639 ARMELChangeHook *hook, *next;
640 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
641 hook->hook(cpu, hook->opaque);
645 /* Return true if this address translation regime has two ranges. */
646 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
649 case ARMMMUIdx_Stage1_E0:
650 case ARMMMUIdx_Stage1_E1:
651 case ARMMMUIdx_Stage1_E1_PAN:
652 case ARMMMUIdx_E10_0:
653 case ARMMMUIdx_E10_1:
654 case ARMMMUIdx_E10_1_PAN:
655 case ARMMMUIdx_E20_0:
656 case ARMMMUIdx_E20_2:
657 case ARMMMUIdx_E20_2_PAN:
664 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
667 case ARMMMUIdx_Stage1_E1_PAN:
668 case ARMMMUIdx_E10_1_PAN:
669 case ARMMMUIdx_E20_2_PAN:
676 /* Return the exception level which controls this address translation regime */
677 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
680 case ARMMMUIdx_E20_0:
681 case ARMMMUIdx_E20_2:
682 case ARMMMUIdx_E20_2_PAN:
683 case ARMMMUIdx_Stage2:
684 case ARMMMUIdx_Stage2_S:
689 case ARMMMUIdx_E10_0:
690 case ARMMMUIdx_Stage1_E0:
691 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
692 case ARMMMUIdx_Stage1_E1:
693 case ARMMMUIdx_Stage1_E1_PAN:
694 case ARMMMUIdx_E10_1:
695 case ARMMMUIdx_E10_1_PAN:
696 case ARMMMUIdx_MPrivNegPri:
697 case ARMMMUIdx_MUserNegPri:
698 case ARMMMUIdx_MPriv:
699 case ARMMMUIdx_MUser:
700 case ARMMMUIdx_MSPrivNegPri:
701 case ARMMMUIdx_MSUserNegPri:
702 case ARMMMUIdx_MSPriv:
703 case ARMMMUIdx_MSUser:
706 g_assert_not_reached();
710 /* Return the SCTLR value which controls this address translation regime */
711 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
713 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
717 * These are the fields in VTCR_EL2 which affect both the Secure stage 2
718 * and the Non-Secure stage 2 translation regimes (and hence which are
719 * not present in VSTCR_EL2).
721 #define VTCR_SHARED_FIELD_MASK \
722 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
723 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
726 /* Return the value of the TCR controlling this translation regime */
727 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
729 if (mmu_idx == ARMMMUIdx_Stage2) {
730 return env->cp15.vtcr_el2;
732 if (mmu_idx == ARMMMUIdx_Stage2_S) {
734 * Secure stage 2 shares fields from VTCR_EL2. We merge those
735 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
736 * value so the callers don't need to special case this.
738 * If a future architecture change defines bits in VSTCR_EL2 that
739 * overlap with these VTCR_EL2 fields we may need to revisit this.
741 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
742 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
745 return env->cp15.tcr_el[regime_el(env, mmu_idx)];
749 * arm_num_brps: Return number of implemented breakpoints.
750 * Note that the ID register BRPS field is "number of bps - 1",
751 * and we return the actual number of breakpoints.
753 static inline int arm_num_brps(ARMCPU *cpu)
755 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
756 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
758 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
763 * arm_num_wrps: Return number of implemented watchpoints.
764 * Note that the ID register WRPS field is "number of wps - 1",
765 * and we return the actual number of watchpoints.
767 static inline int arm_num_wrps(ARMCPU *cpu)
769 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
770 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
772 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
777 * arm_num_ctx_cmps: Return number of implemented context comparators.
778 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
779 * and we return the actual number of comparators.
781 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
783 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
784 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
786 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
791 * v7m_using_psp: Return true if using process stack pointer
792 * Return true if the CPU is currently using the process stack
793 * pointer, or false if it is using the main stack pointer.
795 static inline bool v7m_using_psp(CPUARMState *env)
797 /* Handler mode always uses the main stack; for thread mode
798 * the CONTROL.SPSEL bit determines the answer.
799 * Note that in v7M it is not possible to be in Handler mode with
800 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
802 return !arm_v7m_is_handler_mode(env) &&
803 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
807 * v7m_sp_limit: Return SP limit for current CPU state
808 * Return the SP limit value for the current CPU security state
811 static inline uint32_t v7m_sp_limit(CPUARMState *env)
813 if (v7m_using_psp(env)) {
814 return env->v7m.psplim[env->v7m.secure];
816 return env->v7m.msplim[env->v7m.secure];
822 * Return true if the v7M CPACR permits access to the FPU for the specified
823 * security state and privilege level.
825 static inline bool v7m_cpacr_pass(CPUARMState *env,
826 bool is_secure, bool is_priv)
828 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
830 case 2: /* UNPREDICTABLE: we treat like 0 */
837 g_assert_not_reached();
842 * aarch32_mode_name(): Return name of the AArch32 CPU mode
843 * @psr: Program Status Register indicating CPU mode
845 * Returns, for debug logging purposes, a printable representation
846 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
847 * the low bits of the specified PSR.
849 static inline const char *aarch32_mode_name(uint32_t psr)
851 static const char cpu_mode_names[16][4] = {
852 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
853 "???", "???", "hyp", "und", "???", "???", "???", "sys"
856 return cpu_mode_names[psr & 0xf];
860 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
862 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
863 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
864 * Must be called with the iothread lock held.
866 void arm_cpu_update_virq(ARMCPU *cpu);
869 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
871 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
872 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
873 * Must be called with the iothread lock held.
875 void arm_cpu_update_vfiq(ARMCPU *cpu);
878 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
880 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
881 * following a change to the HCR_EL2.VSE bit.
883 void arm_cpu_update_vserr(ARMCPU *cpu);
887 * @env: The cpu environment
888 * @el: The EL to use.
890 * Return the full ARMMMUIdx for the translation regime for EL.
892 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
896 * @env: The cpu environment
898 * Return the full ARMMMUIdx for the current translation regime.
900 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
903 * arm_stage1_mmu_idx:
904 * @env: The cpu environment
906 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
908 #ifdef CONFIG_USER_ONLY
909 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
911 return ARMMMUIdx_Stage1_E0;
913 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
915 return ARMMMUIdx_Stage1_E0;
918 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
919 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
923 * arm_mmu_idx_is_stage1_of_2:
924 * @mmu_idx: The ARMMMUIdx to test
926 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
927 * first stage of a two stage regime.
929 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
932 case ARMMMUIdx_Stage1_E0:
933 case ARMMMUIdx_Stage1_E1:
934 case ARMMMUIdx_Stage1_E1_PAN:
941 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
942 const ARMISARegisters *id)
944 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
946 if ((features >> ARM_FEATURE_V4T) & 1) {
949 if ((features >> ARM_FEATURE_V5) & 1) {
950 valid |= CPSR_Q; /* V5TE in reality*/
952 if ((features >> ARM_FEATURE_V6) & 1) {
953 valid |= CPSR_E | CPSR_GE;
955 if ((features >> ARM_FEATURE_THUMB2) & 1) {
958 if (isar_feature_aa32_jazelle(id)) {
961 if (isar_feature_aa32_pan(id)) {
964 if (isar_feature_aa32_dit(id)) {
967 if (isar_feature_aa32_ssbs(id)) {
974 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
978 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
979 if (isar_feature_aa64_bti(id)) {
980 valid |= PSTATE_BTYPE;
982 if (isar_feature_aa64_pan(id)) {
985 if (isar_feature_aa64_uao(id)) {
988 if (isar_feature_aa64_dit(id)) {
991 if (isar_feature_aa64_ssbs(id)) {
992 valid |= PSTATE_SSBS;
994 if (isar_feature_aa64_mte(id)) {
1001 /* Granule size (i.e. page size) */
1002 typedef enum ARMGranuleSize {
1003 /* Same order as TG0 encoding */
1011 * arm_granule_bits: Return address size of the granule in bits
1013 * Return the address size of the granule in bits. This corresponds
1014 * to the pseudocode TGxGranuleBits().
1016 static inline int arm_granule_bits(ARMGranuleSize gran)
1026 g_assert_not_reached();
1031 * Parameters of a given virtual address, as extracted from the
1032 * translation control register (TCR) for a given regime.
1034 typedef struct ARMVAParameters {
1038 unsigned select : 1;
1042 bool tsz_oob : 1; /* tsz has been clamped to legal range */
1044 ARMGranuleSize gran : 2;
1047 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1048 ARMMMUIdx mmu_idx, bool data);
1050 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1051 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1053 /* Determine if allocation tags are available. */
1054 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1058 && arm_feature(env, ARM_FEATURE_EL3)
1059 && !(env->cp15.scr_el3 & SCR_ATA)) {
1062 if (el < 2 && arm_is_el2_enabled(env)) {
1063 uint64_t hcr = arm_hcr_el2_eff(env);
1064 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1068 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1072 #ifndef CONFIG_USER_ONLY
1074 /* Security attributes for an address, as returned by v8m_security_lookup. */
1075 typedef struct V8M_SAttributes {
1076 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1085 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1086 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1087 bool secure, V8M_SAttributes *sattrs);
1089 /* Cacheability and shareability attributes for a memory access */
1090 typedef struct ARMCacheAttrs {
1092 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1093 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1095 unsigned int attrs:8;
1096 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1097 bool is_s2_format:1;
1098 bool guarded:1; /* guarded bit of the v8-64 PTE */
1101 /* Fields that are valid upon success. */
1102 typedef struct GetPhysAddrResult {
1104 ARMCacheAttrs cacheattrs;
1105 } GetPhysAddrResult;
1108 * get_phys_addr_with_secure: get the physical address for a virtual address
1110 * @address: virtual address to get physical address for
1111 * @access_type: 0 for read, 1 for write, 2 for execute
1112 * @mmu_idx: MMU index indicating required translation regime
1113 * @is_secure: security state for the access
1114 * @result: set on translation success.
1115 * @fi: set to fault info if the translation fails
1117 * Find the physical address corresponding to the given virtual address,
1118 * by doing a translation table walk on MMU based systems or using the
1119 * MPU state on MPU based systems.
1121 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1122 * prot and page_size may not be filled in, and the populated fsr value provides
1123 * information on why the translation aborted, in the format of a
1124 * DFSR/IFSR fault register, with the following caveats:
1125 * * we honour the short vs long DFSR format differences.
1126 * * the WnR bit is never set (the caller must do this).
1127 * * for PSMAv5 based systems we don't bother to return a full FSR format
1130 bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
1131 MMUAccessType access_type,
1132 ARMMMUIdx mmu_idx, bool is_secure,
1133 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1134 __attribute__((nonnull));
1137 * get_phys_addr: get the physical address for a virtual address
1139 * @address: virtual address to get physical address for
1140 * @access_type: 0 for read, 1 for write, 2 for execute
1141 * @mmu_idx: MMU index indicating required translation regime
1142 * @result: set on translation success.
1143 * @fi: set to fault info if the translation fails
1145 * Similarly, but use the security regime of @mmu_idx.
1147 bool get_phys_addr(CPUARMState *env, target_ulong address,
1148 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1149 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1150 __attribute__((nonnull));
1152 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1153 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1154 bool is_secure, GetPhysAddrResult *result,
1155 ARMMMUFaultInfo *fi, uint32_t *mregion);
1157 void arm_log_exception(CPUState *cs);
1159 #endif /* !CONFIG_USER_ONLY */
1162 * The log2 of the words in the tag block, for GMID_EL1.BS.
1163 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1165 #define GMID_EL1_BS 6
1168 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1169 * the same simd_desc() encoding due to restrictions on size.
1170 * Use these instead.
1172 FIELD(PREDDESC, OPRSZ, 0, 6)
1173 FIELD(PREDDESC, ESZ, 6, 2)
1174 FIELD(PREDDESC, DATA, 8, 24)
1177 * The SVE simd_data field, for memory ops, contains either
1178 * rd (5 bits) or a shift count (2 bits).
1180 #define SVE_MTEDESC_SHIFT 5
1182 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1183 FIELD(MTEDESC, MIDX, 0, 4)
1184 FIELD(MTEDESC, TBI, 4, 2)
1185 FIELD(MTEDESC, TCMA, 6, 2)
1186 FIELD(MTEDESC, WRITE, 8, 1)
1187 FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
1189 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1190 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1192 static inline int allocation_tag_from_addr(uint64_t ptr)
1194 return extract64(ptr, 56, 4);
1197 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1199 return deposit64(ptr, 56, 4, rtag);
1202 /* Return true if tbi bits mean that the access is checked. */
1203 static inline bool tbi_check(uint32_t desc, int bit55)
1205 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1208 /* Return true if tcma bits mean that the access is unchecked. */
1209 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1212 * We had extracted bit55 and ptr_tag for other reasons, so fold
1213 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1215 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1216 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1217 return tcma && match;
1221 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1222 * for the tag to be present in the FAR_ELx register. But for user-only
1223 * mode, we do not have a TLB with which to implement this, so we must
1224 * remove the top byte.
1226 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1228 #ifdef CONFIG_USER_ONLY
1229 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1230 ptr &= sextract64(ptr, 0, 56);
1235 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1237 #ifdef CONFIG_USER_ONLY
1238 int64_t clean_ptr = sextract64(ptr, 0, 56);
1239 if (tbi_check(desc, clean_ptr < 0)) {
1246 /* Values for M-profile PSR.ECI for MVE insns */
1248 ECI_NONE = 0, /* No completed beats */
1249 ECI_A0 = 1, /* Completed: A0 */
1250 ECI_A0A1 = 2, /* Completed: A0, A1 */
1252 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1253 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1254 /* All other values reserved */
1257 /* Definitions for the PMU registers */
1258 #define PMCRN_MASK 0xf800
1259 #define PMCRN_SHIFT 11
1269 * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1270 * which can be written as 1 to trigger behaviour but which stay RAZ).
1272 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1274 #define PMXEVTYPER_P 0x80000000
1275 #define PMXEVTYPER_U 0x40000000
1276 #define PMXEVTYPER_NSK 0x20000000
1277 #define PMXEVTYPER_NSU 0x10000000
1278 #define PMXEVTYPER_NSH 0x08000000
1279 #define PMXEVTYPER_M 0x04000000
1280 #define PMXEVTYPER_MT 0x02000000
1281 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1282 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1283 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1284 PMXEVTYPER_M | PMXEVTYPER_MT | \
1285 PMXEVTYPER_EVTCOUNT)
1287 #define PMCCFILTR 0xf8000000
1288 #define PMCCFILTR_M PMXEVTYPER_M
1289 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1291 static inline uint32_t pmu_num_counters(CPUARMState *env)
1293 ARMCPU *cpu = env_archcpu(env);
1295 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1298 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1299 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1301 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1304 #ifdef TARGET_AARCH64
1305 int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
1306 int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
1307 int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
1308 int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
1309 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1310 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1311 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1312 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1315 #ifdef CONFIG_USER_ONLY
1316 static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
1318 void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
1321 bool el_is_in_host(CPUARMState *env, int el);
1323 void aa32_max_features(ARMCPU *cpu);
1324 int exception_target_el(CPUARMState *env);
1325 bool arm_singlestep_active(CPUARMState *env);
1326 bool arm_generate_debug_exceptions(CPUARMState *env);
1328 /* Add the cpreg definitions for debug related system registers */
1329 void define_debug_regs(ARMCPU *cpu);
1331 /* Effective value of MDCR_EL2 */
1332 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1334 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1337 /* Powers of 2 for sve_vq_map et al. */
1338 #define SVE_VQ_POW2_MAP \
1339 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1340 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))