2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
32 /* register banks for CPU modes */
42 static inline bool excp_is_internal(int excp)
44 /* Return true if this exception number represents a QEMU-internal
45 * exception that will not be passed to the guest.
47 return excp == EXCP_INTERRUPT
50 || excp == EXCP_HALTED
51 || excp == EXCP_EXCEPTION_EXIT
52 || excp == EXCP_KERNEL_TRAP
53 || excp == EXCP_SEMIHOST;
56 /* Scale factor for generic timers, ie number of ns per tick.
57 * This gives a 62.5MHz timer.
59 #define GTIMER_SCALE 16
61 /* Bit definitions for the v7M CONTROL register */
62 FIELD(V7M_CONTROL, NPRIV, 0, 1)
63 FIELD(V7M_CONTROL, SPSEL, 1, 1)
64 FIELD(V7M_CONTROL, FPCA, 2, 1)
65 FIELD(V7M_CONTROL, SFPA, 3, 1)
67 /* Bit definitions for v7M exception return payload */
68 FIELD(V7M_EXCRET, ES, 0, 1)
69 FIELD(V7M_EXCRET, RES0, 1, 1)
70 FIELD(V7M_EXCRET, SPSEL, 2, 1)
71 FIELD(V7M_EXCRET, MODE, 3, 1)
72 FIELD(V7M_EXCRET, FTYPE, 4, 1)
73 FIELD(V7M_EXCRET, DCRS, 5, 1)
74 FIELD(V7M_EXCRET, S, 6, 1)
75 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
77 /* Minimum value which is a magic number for exception return */
78 #define EXC_RETURN_MIN_MAGIC 0xff000000
79 /* Minimum number which is a magic number for function or exception return
80 * when using v8M security extension
82 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
84 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
85 FIELD(DBGWCR, E, 0, 1)
86 FIELD(DBGWCR, PAC, 1, 2)
87 FIELD(DBGWCR, LSC, 3, 2)
88 FIELD(DBGWCR, BAS, 5, 8)
89 FIELD(DBGWCR, HMC, 13, 1)
90 FIELD(DBGWCR, SSC, 14, 2)
91 FIELD(DBGWCR, LBN, 16, 4)
92 FIELD(DBGWCR, WT, 20, 1)
93 FIELD(DBGWCR, MASK, 24, 5)
94 FIELD(DBGWCR, SSCE, 29, 1)
96 /* We use a few fake FSR values for internal purposes in M profile.
97 * M profile cores don't have A/R format FSRs, but currently our
98 * get_phys_addr() code assumes A/R profile and reports failures via
99 * an A/R format FSR value. We then translate that into the proper
100 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
101 * Mostly the FSR values we use for this are those defined for v7PMSA,
102 * since we share some of that codepath. A few kinds of fault are
103 * only for M profile and have no A/R equivalent, though, so we have
104 * to pick a value from the reserved range (which we never otherwise
105 * generate) to use for these.
106 * These values will never be visible to the guest.
108 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
109 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
112 * raise_exception: Raise the specified exception.
113 * Raise a guest exception with the specified value, syndrome register
114 * and target exception level. This should be called from helper functions,
115 * and never returns because we will longjump back up to the CPU main loop.
117 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
118 uint32_t syndrome, uint32_t target_el);
121 * Similarly, but also use unwinding to restore cpu state.
123 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
124 uint32_t syndrome, uint32_t target_el,
128 * For AArch64, map a given EL to an index in the banked_spsr array.
129 * Note that this mapping and the AArch32 mapping defined in bank_number()
130 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
131 * mandated mapping between each other.
133 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
135 static const unsigned int map[4] = {
136 [1] = BANK_SVC, /* EL1. */
137 [2] = BANK_HYP, /* EL2. */
138 [3] = BANK_MON, /* EL3. */
140 assert(el >= 1 && el <= 3);
144 /* Map CPU modes onto saved register banks. */
145 static inline int bank_number(int mode)
148 case ARM_CPU_MODE_USR:
149 case ARM_CPU_MODE_SYS:
151 case ARM_CPU_MODE_SVC:
153 case ARM_CPU_MODE_ABT:
155 case ARM_CPU_MODE_UND:
157 case ARM_CPU_MODE_IRQ:
159 case ARM_CPU_MODE_FIQ:
161 case ARM_CPU_MODE_HYP:
163 case ARM_CPU_MODE_MON:
166 g_assert_not_reached();
170 * r14_bank_number: Map CPU mode onto register bank for r14
172 * Given an AArch32 CPU mode, return the index into the saved register
173 * banks to use for the R14 (LR) in that mode. This is the same as
174 * bank_number(), except for the special case of Hyp mode, where
175 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
176 * This should be used as the index into env->banked_r14[], and
177 * bank_number() used for the index into env->banked_r13[] and
178 * env->banked_spsr[].
180 static inline int r14_bank_number(int mode)
182 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
185 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
186 void arm_translate_init(void);
189 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
190 #endif /* CONFIG_TCG */
193 * aarch64_sve_zcr_get_valid_len:
195 * @start_len: maximum len to consider
197 * Return the maximum supported sve vector length <= @start_len.
198 * Note that both @start_len and the return value are in units
199 * of ZCR_ELx.LEN, so the vector bit length is (x + 1) * 128.
201 uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len);
203 enum arm_fprounding {
212 int arm_rmode_to_sf(int rmode);
214 static inline void aarch64_save_sp(CPUARMState *env, int el)
216 if (env->pstate & PSTATE_SP) {
217 env->sp_el[el] = env->xregs[31];
219 env->sp_el[0] = env->xregs[31];
223 static inline void aarch64_restore_sp(CPUARMState *env, int el)
225 if (env->pstate & PSTATE_SP) {
226 env->xregs[31] = env->sp_el[el];
228 env->xregs[31] = env->sp_el[0];
232 static inline void update_spsel(CPUARMState *env, uint32_t imm)
234 unsigned int cur_el = arm_current_el(env);
235 /* Update PSTATE SPSel bit; this requires us to update the
236 * working stack pointer in xregs[31].
238 if (!((imm ^ env->pstate) & PSTATE_SP)) {
241 aarch64_save_sp(env, cur_el);
242 env->pstate = deposit32(env->pstate, 0, 1, imm);
244 /* We rely on illegal updates to SPsel from EL0 to get trapped
245 * at translation time.
247 assert(cur_el >= 1 && cur_el <= 3);
248 aarch64_restore_sp(env, cur_el);
255 * Returns the implementation defined bit-width of physical addresses.
256 * The ARMv8 reference manuals refer to this as PAMax().
258 unsigned int arm_pamax(ARMCPU *cpu);
260 /* Return true if extended addresses are enabled.
261 * This is always the case if our translation regime is 64 bit,
262 * but depends on TTBCR.EAE for 32 bit.
264 static inline bool extended_addresses_enabled(CPUARMState *env)
266 TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
267 return arm_el_is_aa64(env, 1) ||
268 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
271 /* Update a QEMU watchpoint based on the information the guest has set in the
272 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
274 void hw_watchpoint_update(ARMCPU *cpu, int n);
275 /* Update the QEMU watchpoints for every guest watchpoint. This does a
276 * complete delete-and-reinstate of the QEMU watchpoint list and so is
277 * suitable for use after migration or on reset.
279 void hw_watchpoint_update_all(ARMCPU *cpu);
280 /* Update a QEMU breakpoint based on the information the guest has set in the
281 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
283 void hw_breakpoint_update(ARMCPU *cpu, int n);
284 /* Update the QEMU breakpoints for every guest breakpoint. This does a
285 * complete delete-and-reinstate of the QEMU breakpoint list and so is
286 * suitable for use after migration or on reset.
288 void hw_breakpoint_update_all(ARMCPU *cpu);
290 /* Callback function for checking if a breakpoint should trigger. */
291 bool arm_debug_check_breakpoint(CPUState *cs);
293 /* Callback function for checking if a watchpoint should trigger. */
294 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
296 /* Adjust addresses (in BE32 mode) before testing against watchpoint
299 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
301 /* Callback function for when a watchpoint or breakpoint triggers. */
302 void arm_debug_excp_handler(CPUState *cs);
304 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
305 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
309 static inline void arm_handle_psci_call(ARMCPU *cpu)
311 g_assert_not_reached();
314 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
315 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
316 /* Actually handle a PSCI call */
317 void arm_handle_psci_call(ARMCPU *cpu);
321 * arm_clear_exclusive: clear the exclusive monitor
323 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
325 static inline void arm_clear_exclusive(CPUARMState *env)
327 env->exclusive_addr = -1;
331 * ARMFaultType: type of an ARM MMU fault
332 * This corresponds to the v8A pseudocode's Fault enumeration,
333 * with extensions for QEMU internal conditions.
335 typedef enum ARMFaultType {
342 ARMFault_Translation,
343 ARMFault_AddressSize,
344 ARMFault_SyncExternal,
345 ARMFault_SyncExternalOnWalk,
347 ARMFault_SyncParityOnWalk,
348 ARMFault_AsyncParity,
349 ARMFault_AsyncExternal,
351 ARMFault_TLBConflict,
354 ARMFault_ICacheMaint,
355 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
356 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
360 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
361 * @type: Type of fault
362 * @level: Table walk level (for translation, access flag and permission faults)
363 * @domain: Domain of the fault address (for non-LPAE CPUs only)
364 * @s2addr: Address that caused a fault at stage 2
365 * @stage2: True if we faulted at stage 2
366 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
367 * @s1ns: True if we faulted on a non-secure IPA while in secure state
368 * @ea: True if we should set the EA (external abort type) bit in syndrome
370 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
371 struct ARMMMUFaultInfo {
383 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
384 * Compare pseudocode EncodeSDFSC(), though unlike that function
385 * we set up a whole FSR-format code including domain field and
386 * putting the high bit of the FSC into bit 10.
388 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
395 case ARMFault_AccessFlag:
396 fsc = fi->level == 1 ? 0x3 : 0x6;
398 case ARMFault_Alignment:
401 case ARMFault_Permission:
402 fsc = fi->level == 1 ? 0xd : 0xf;
404 case ARMFault_Domain:
405 fsc = fi->level == 1 ? 0x9 : 0xb;
407 case ARMFault_Translation:
408 fsc = fi->level == 1 ? 0x5 : 0x7;
410 case ARMFault_SyncExternal:
411 fsc = 0x8 | (fi->ea << 12);
413 case ARMFault_SyncExternalOnWalk:
414 fsc = fi->level == 1 ? 0xc : 0xe;
415 fsc |= (fi->ea << 12);
417 case ARMFault_SyncParity:
420 case ARMFault_SyncParityOnWalk:
421 fsc = fi->level == 1 ? 0x40c : 0x40e;
423 case ARMFault_AsyncParity:
426 case ARMFault_AsyncExternal:
427 fsc = 0x406 | (fi->ea << 12);
432 case ARMFault_TLBConflict:
435 case ARMFault_Lockdown:
438 case ARMFault_Exclusive:
441 case ARMFault_ICacheMaint:
444 case ARMFault_Background:
447 case ARMFault_QEMU_NSCExec:
448 fsc = M_FAKE_FSR_NSC_EXEC;
450 case ARMFault_QEMU_SFault:
451 fsc = M_FAKE_FSR_SFAULT;
454 /* Other faults can't occur in a context that requires a
455 * short-format status code.
457 g_assert_not_reached();
460 fsc |= (fi->domain << 4);
465 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
466 * Compare pseudocode EncodeLDFSC(), though unlike that function
467 * we fill in also the LPAE bit 9 of a DFSR format.
469 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
476 case ARMFault_AddressSize:
477 assert(fi->level >= -1 && fi->level <= 3);
484 case ARMFault_AccessFlag:
485 assert(fi->level >= 0 && fi->level <= 3);
486 fsc = 0b001000 | fi->level;
488 case ARMFault_Permission:
489 assert(fi->level >= 0 && fi->level <= 3);
490 fsc = 0b001100 | fi->level;
492 case ARMFault_Translation:
493 assert(fi->level >= -1 && fi->level <= 3);
497 fsc = 0b000100 | fi->level;
500 case ARMFault_SyncExternal:
501 fsc = 0x10 | (fi->ea << 12);
503 case ARMFault_SyncExternalOnWalk:
504 assert(fi->level >= -1 && fi->level <= 3);
508 fsc = 0b010100 | fi->level;
512 case ARMFault_SyncParity:
515 case ARMFault_SyncParityOnWalk:
516 assert(fi->level >= -1 && fi->level <= 3);
520 fsc = 0b011100 | fi->level;
523 case ARMFault_AsyncParity:
526 case ARMFault_AsyncExternal:
527 fsc = 0x11 | (fi->ea << 12);
529 case ARMFault_Alignment:
535 case ARMFault_TLBConflict:
538 case ARMFault_Lockdown:
541 case ARMFault_Exclusive:
545 /* Other faults can't occur in a context that requires a
546 * long-format status code.
548 g_assert_not_reached();
555 static inline bool arm_extabort_type(MemTxResult result)
557 /* The EA bit in syndromes and fault status registers is an
558 * IMPDEF classification of external aborts. ARM implementations
559 * usually use this to indicate AXI bus Decode error (0) or
560 * Slave error (1); in QEMU we follow that.
562 return result != MEMTX_DECODE_ERROR;
565 #ifdef CONFIG_USER_ONLY
566 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
567 MMUAccessType access_type,
568 bool maperr, uintptr_t ra);
569 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
570 MMUAccessType access_type, uintptr_t ra);
572 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
573 MMUAccessType access_type, int mmu_idx,
574 bool probe, uintptr_t retaddr);
577 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
579 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
582 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
584 if (arm_feature(env, ARM_FEATURE_M)) {
585 return mmu_idx | ARM_MMU_IDX_M;
587 return mmu_idx | ARM_MMU_IDX_A;
591 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
593 /* AArch64 is always a-profile. */
594 return mmu_idx | ARM_MMU_IDX_A;
597 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
600 * Return the MMU index for a v7M CPU with all relevant information
601 * manually specified.
603 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
604 bool secstate, bool priv, bool negpri);
607 * Return the MMU index for a v7M CPU in the specified security and
610 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
611 bool secstate, bool priv);
613 /* Return the MMU index for a v7M CPU in the specified security state */
614 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
616 /* Return true if the stage 1 translation regime is using LPAE format page
618 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
620 /* Raise a data fault alignment exception for the specified virtual address */
621 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
622 MMUAccessType access_type,
623 int mmu_idx, uintptr_t retaddr);
625 /* arm_cpu_do_transaction_failed: handle a memory system error response
626 * (eg "no device/memory present at address") by raising an external abort
629 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
630 vaddr addr, unsigned size,
631 MMUAccessType access_type,
632 int mmu_idx, MemTxAttrs attrs,
633 MemTxResult response, uintptr_t retaddr);
635 /* Call any registered EL change hooks */
636 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
638 ARMELChangeHook *hook, *next;
639 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
640 hook->hook(cpu, hook->opaque);
643 static inline void arm_call_el_change_hook(ARMCPU *cpu)
645 ARMELChangeHook *hook, *next;
646 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
647 hook->hook(cpu, hook->opaque);
651 /* Return true if this address translation regime has two ranges. */
652 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
655 case ARMMMUIdx_Stage1_E0:
656 case ARMMMUIdx_Stage1_E1:
657 case ARMMMUIdx_Stage1_E1_PAN:
658 case ARMMMUIdx_Stage1_SE0:
659 case ARMMMUIdx_Stage1_SE1:
660 case ARMMMUIdx_Stage1_SE1_PAN:
661 case ARMMMUIdx_E10_0:
662 case ARMMMUIdx_E10_1:
663 case ARMMMUIdx_E10_1_PAN:
664 case ARMMMUIdx_E20_0:
665 case ARMMMUIdx_E20_2:
666 case ARMMMUIdx_E20_2_PAN:
667 case ARMMMUIdx_SE10_0:
668 case ARMMMUIdx_SE10_1:
669 case ARMMMUIdx_SE10_1_PAN:
670 case ARMMMUIdx_SE20_0:
671 case ARMMMUIdx_SE20_2:
672 case ARMMMUIdx_SE20_2_PAN:
679 /* Return true if this address translation regime is secure */
680 static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
683 case ARMMMUIdx_E10_0:
684 case ARMMMUIdx_E10_1:
685 case ARMMMUIdx_E10_1_PAN:
686 case ARMMMUIdx_E20_0:
687 case ARMMMUIdx_E20_2:
688 case ARMMMUIdx_E20_2_PAN:
689 case ARMMMUIdx_Stage1_E0:
690 case ARMMMUIdx_Stage1_E1:
691 case ARMMMUIdx_Stage1_E1_PAN:
693 case ARMMMUIdx_Stage2:
694 case ARMMMUIdx_MPrivNegPri:
695 case ARMMMUIdx_MUserNegPri:
696 case ARMMMUIdx_MPriv:
697 case ARMMMUIdx_MUser:
700 case ARMMMUIdx_SE10_0:
701 case ARMMMUIdx_SE10_1:
702 case ARMMMUIdx_SE10_1_PAN:
703 case ARMMMUIdx_SE20_0:
704 case ARMMMUIdx_SE20_2:
705 case ARMMMUIdx_SE20_2_PAN:
706 case ARMMMUIdx_Stage1_SE0:
707 case ARMMMUIdx_Stage1_SE1:
708 case ARMMMUIdx_Stage1_SE1_PAN:
710 case ARMMMUIdx_Stage2_S:
711 case ARMMMUIdx_MSPrivNegPri:
712 case ARMMMUIdx_MSUserNegPri:
713 case ARMMMUIdx_MSPriv:
714 case ARMMMUIdx_MSUser:
717 g_assert_not_reached();
721 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
724 case ARMMMUIdx_Stage1_E1_PAN:
725 case ARMMMUIdx_Stage1_SE1_PAN:
726 case ARMMMUIdx_E10_1_PAN:
727 case ARMMMUIdx_E20_2_PAN:
728 case ARMMMUIdx_SE10_1_PAN:
729 case ARMMMUIdx_SE20_2_PAN:
736 /* Return the exception level which controls this address translation regime */
737 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
740 case ARMMMUIdx_SE20_0:
741 case ARMMMUIdx_SE20_2:
742 case ARMMMUIdx_SE20_2_PAN:
743 case ARMMMUIdx_E20_0:
744 case ARMMMUIdx_E20_2:
745 case ARMMMUIdx_E20_2_PAN:
746 case ARMMMUIdx_Stage2:
747 case ARMMMUIdx_Stage2_S:
753 case ARMMMUIdx_SE10_0:
754 case ARMMMUIdx_Stage1_SE0:
755 return arm_el_is_aa64(env, 3) ? 1 : 3;
756 case ARMMMUIdx_SE10_1:
757 case ARMMMUIdx_SE10_1_PAN:
758 case ARMMMUIdx_Stage1_E0:
759 case ARMMMUIdx_Stage1_E1:
760 case ARMMMUIdx_Stage1_E1_PAN:
761 case ARMMMUIdx_Stage1_SE1:
762 case ARMMMUIdx_Stage1_SE1_PAN:
763 case ARMMMUIdx_E10_0:
764 case ARMMMUIdx_E10_1:
765 case ARMMMUIdx_E10_1_PAN:
766 case ARMMMUIdx_MPrivNegPri:
767 case ARMMMUIdx_MUserNegPri:
768 case ARMMMUIdx_MPriv:
769 case ARMMMUIdx_MUser:
770 case ARMMMUIdx_MSPrivNegPri:
771 case ARMMMUIdx_MSUserNegPri:
772 case ARMMMUIdx_MSPriv:
773 case ARMMMUIdx_MSUser:
776 g_assert_not_reached();
780 /* Return the TCR controlling this translation regime */
781 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
783 if (mmu_idx == ARMMMUIdx_Stage2) {
784 return &env->cp15.vtcr_el2;
786 if (mmu_idx == ARMMMUIdx_Stage2_S) {
788 * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
789 * those are not currently used by QEMU, so just return VSTCR_EL2.
791 return &env->cp15.vstcr_el2;
793 return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
796 /* Return the FSR value for a debug exception (watchpoint, hardware
797 * breakpoint or BKPT insn) targeting the specified exception level.
799 static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
801 ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
802 int target_el = arm_debug_target_el(env);
803 bool using_lpae = false;
805 if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
808 if (arm_feature(env, ARM_FEATURE_LPAE) &&
809 (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
815 return arm_fi_to_lfsc(&fi);
817 return arm_fi_to_sfsc(&fi);
822 * arm_num_brps: Return number of implemented breakpoints.
823 * Note that the ID register BRPS field is "number of bps - 1",
824 * and we return the actual number of breakpoints.
826 static inline int arm_num_brps(ARMCPU *cpu)
828 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
829 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
831 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
836 * arm_num_wrps: Return number of implemented watchpoints.
837 * Note that the ID register WRPS field is "number of wps - 1",
838 * and we return the actual number of watchpoints.
840 static inline int arm_num_wrps(ARMCPU *cpu)
842 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
843 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
845 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
850 * arm_num_ctx_cmps: Return number of implemented context comparators.
851 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
852 * and we return the actual number of comparators.
854 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
856 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
857 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
859 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
864 * v7m_using_psp: Return true if using process stack pointer
865 * Return true if the CPU is currently using the process stack
866 * pointer, or false if it is using the main stack pointer.
868 static inline bool v7m_using_psp(CPUARMState *env)
870 /* Handler mode always uses the main stack; for thread mode
871 * the CONTROL.SPSEL bit determines the answer.
872 * Note that in v7M it is not possible to be in Handler mode with
873 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
875 return !arm_v7m_is_handler_mode(env) &&
876 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
880 * v7m_sp_limit: Return SP limit for current CPU state
881 * Return the SP limit value for the current CPU security state
884 static inline uint32_t v7m_sp_limit(CPUARMState *env)
886 if (v7m_using_psp(env)) {
887 return env->v7m.psplim[env->v7m.secure];
889 return env->v7m.msplim[env->v7m.secure];
895 * Return true if the v7M CPACR permits access to the FPU for the specified
896 * security state and privilege level.
898 static inline bool v7m_cpacr_pass(CPUARMState *env,
899 bool is_secure, bool is_priv)
901 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
903 case 2: /* UNPREDICTABLE: we treat like 0 */
910 g_assert_not_reached();
915 * aarch32_mode_name(): Return name of the AArch32 CPU mode
916 * @psr: Program Status Register indicating CPU mode
918 * Returns, for debug logging purposes, a printable representation
919 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
920 * the low bits of the specified PSR.
922 static inline const char *aarch32_mode_name(uint32_t psr)
924 static const char cpu_mode_names[16][4] = {
925 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
926 "???", "???", "hyp", "und", "???", "???", "???", "sys"
929 return cpu_mode_names[psr & 0xf];
933 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
935 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
936 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
937 * Must be called with the iothread lock held.
939 void arm_cpu_update_virq(ARMCPU *cpu);
942 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
944 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
945 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
946 * Must be called with the iothread lock held.
948 void arm_cpu_update_vfiq(ARMCPU *cpu);
951 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
953 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
954 * following a change to the HCR_EL2.VSE bit.
956 void arm_cpu_update_vserr(ARMCPU *cpu);
960 * @env: The cpu environment
961 * @el: The EL to use.
963 * Return the full ARMMMUIdx for the translation regime for EL.
965 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
969 * @env: The cpu environment
971 * Return the full ARMMMUIdx for the current translation regime.
973 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
976 * arm_stage1_mmu_idx:
977 * @env: The cpu environment
979 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
981 #ifdef CONFIG_USER_ONLY
982 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
984 return ARMMMUIdx_Stage1_E0;
987 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
991 * arm_mmu_idx_is_stage1_of_2:
992 * @mmu_idx: The ARMMMUIdx to test
994 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
995 * first stage of a two stage regime.
997 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1000 case ARMMMUIdx_Stage1_E0:
1001 case ARMMMUIdx_Stage1_E1:
1002 case ARMMMUIdx_Stage1_E1_PAN:
1003 case ARMMMUIdx_Stage1_SE0:
1004 case ARMMMUIdx_Stage1_SE1:
1005 case ARMMMUIdx_Stage1_SE1_PAN:
1012 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1013 const ARMISARegisters *id)
1015 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1017 if ((features >> ARM_FEATURE_V4T) & 1) {
1020 if ((features >> ARM_FEATURE_V5) & 1) {
1021 valid |= CPSR_Q; /* V5TE in reality*/
1023 if ((features >> ARM_FEATURE_V6) & 1) {
1024 valid |= CPSR_E | CPSR_GE;
1026 if ((features >> ARM_FEATURE_THUMB2) & 1) {
1029 if (isar_feature_aa32_jazelle(id)) {
1032 if (isar_feature_aa32_pan(id)) {
1035 if (isar_feature_aa32_dit(id)) {
1038 if (isar_feature_aa32_ssbs(id)) {
1045 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1049 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1050 if (isar_feature_aa64_bti(id)) {
1051 valid |= PSTATE_BTYPE;
1053 if (isar_feature_aa64_pan(id)) {
1054 valid |= PSTATE_PAN;
1056 if (isar_feature_aa64_uao(id)) {
1057 valid |= PSTATE_UAO;
1059 if (isar_feature_aa64_dit(id)) {
1060 valid |= PSTATE_DIT;
1062 if (isar_feature_aa64_ssbs(id)) {
1063 valid |= PSTATE_SSBS;
1065 if (isar_feature_aa64_mte(id)) {
1066 valid |= PSTATE_TCO;
1073 * Parameters of a given virtual address, as extracted from the
1074 * translation control register (TCR) for a given regime.
1076 typedef struct ARMVAParameters {
1080 unsigned select : 1;
1086 bool tsz_oob : 1; /* tsz has been clamped to legal range */
1090 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1091 ARMMMUIdx mmu_idx, bool data);
1093 static inline int exception_target_el(CPUARMState *env)
1095 int target_el = MAX(1, arm_current_el(env));
1098 * No such thing as secure EL1 if EL3 is aarch32,
1099 * so update the target EL to EL3 in this case.
1101 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
1108 /* Determine if allocation tags are available. */
1109 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1113 && arm_feature(env, ARM_FEATURE_EL3)
1114 && !(env->cp15.scr_el3 & SCR_ATA)) {
1117 if (el < 2 && arm_is_el2_enabled(env)) {
1118 uint64_t hcr = arm_hcr_el2_eff(env);
1119 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1123 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1127 #ifndef CONFIG_USER_ONLY
1129 /* Security attributes for an address, as returned by v8m_security_lookup. */
1130 typedef struct V8M_SAttributes {
1131 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1140 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1141 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1142 V8M_SAttributes *sattrs);
1144 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1145 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1146 hwaddr *phys_ptr, MemTxAttrs *txattrs,
1147 int *prot, bool *is_subpage,
1148 ARMMMUFaultInfo *fi, uint32_t *mregion);
1150 /* Cacheability and shareability attributes for a memory access */
1151 typedef struct ARMCacheAttrs {
1152 unsigned int attrs:8; /* as in the MAIR register encoding */
1153 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1156 bool get_phys_addr(CPUARMState *env, target_ulong address,
1157 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1158 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
1159 target_ulong *page_size,
1160 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
1161 __attribute__((nonnull));
1163 void arm_log_exception(CPUState *cs);
1165 #endif /* !CONFIG_USER_ONLY */
1168 * The log2 of the words in the tag block, for GMID_EL1.BS.
1169 * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
1171 #define GMID_EL1_BS 6
1173 /* We associate one allocation tag per 16 bytes, the minimum. */
1174 #define LOG2_TAG_GRANULE 4
1175 #define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
1178 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1179 * the same simd_desc() encoding due to restrictions on size.
1180 * Use these instead.
1182 FIELD(PREDDESC, OPRSZ, 0, 6)
1183 FIELD(PREDDESC, ESZ, 6, 2)
1184 FIELD(PREDDESC, DATA, 8, 24)
1187 * The SVE simd_data field, for memory ops, contains either
1188 * rd (5 bits) or a shift count (2 bits).
1190 #define SVE_MTEDESC_SHIFT 5
1192 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1193 FIELD(MTEDESC, MIDX, 0, 4)
1194 FIELD(MTEDESC, TBI, 4, 2)
1195 FIELD(MTEDESC, TCMA, 6, 2)
1196 FIELD(MTEDESC, WRITE, 8, 1)
1197 FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
1199 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1200 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1202 static inline int allocation_tag_from_addr(uint64_t ptr)
1204 return extract64(ptr, 56, 4);
1207 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1209 return deposit64(ptr, 56, 4, rtag);
1212 /* Return true if tbi bits mean that the access is checked. */
1213 static inline bool tbi_check(uint32_t desc, int bit55)
1215 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1218 /* Return true if tcma bits mean that the access is unchecked. */
1219 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1222 * We had extracted bit55 and ptr_tag for other reasons, so fold
1223 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1225 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1226 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1227 return tcma && match;
1231 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1232 * for the tag to be present in the FAR_ELx register. But for user-only
1233 * mode, we do not have a TLB with which to implement this, so we must
1234 * remove the top byte.
1236 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1238 #ifdef CONFIG_USER_ONLY
1239 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1240 ptr &= sextract64(ptr, 0, 56);
1245 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1247 #ifdef CONFIG_USER_ONLY
1248 int64_t clean_ptr = sextract64(ptr, 0, 56);
1249 if (tbi_check(desc, clean_ptr < 0)) {
1256 /* Values for M-profile PSR.ECI for MVE insns */
1258 ECI_NONE = 0, /* No completed beats */
1259 ECI_A0 = 1, /* Completed: A0 */
1260 ECI_A0A1 = 2, /* Completed: A0, A1 */
1262 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1263 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1264 /* All other values reserved */
1267 /* Definitions for the PMU registers */
1268 #define PMCRN_MASK 0xf800
1269 #define PMCRN_SHIFT 11
1278 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1279 * which can be written as 1 to trigger behaviour but which stay RAZ).
1281 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1283 #define PMXEVTYPER_P 0x80000000
1284 #define PMXEVTYPER_U 0x40000000
1285 #define PMXEVTYPER_NSK 0x20000000
1286 #define PMXEVTYPER_NSU 0x10000000
1287 #define PMXEVTYPER_NSH 0x08000000
1288 #define PMXEVTYPER_M 0x04000000
1289 #define PMXEVTYPER_MT 0x02000000
1290 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1291 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1292 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1293 PMXEVTYPER_M | PMXEVTYPER_MT | \
1294 PMXEVTYPER_EVTCOUNT)
1296 #define PMCCFILTR 0xf8000000
1297 #define PMCCFILTR_M PMXEVTYPER_M
1298 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1300 static inline uint32_t pmu_num_counters(CPUARMState *env)
1302 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1305 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1306 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1308 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1311 #ifdef TARGET_AARCH64
1312 int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
1313 int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
1314 int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
1315 int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
1318 #ifdef CONFIG_USER_ONLY
1319 static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
1321 void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
1324 void aa32_max_features(ARMCPU *cpu);