2 * QEMU ARM CPU -- internal functions and types
4 * Copyright (c) 2014 Linaro Ltd
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
20 * This header defines functions, types, etc which need to be shared
21 * between different source files within target/arm/ but which are
22 * private to it and not required by the rest of QEMU.
25 #ifndef TARGET_ARM_INTERNALS_H
26 #define TARGET_ARM_INTERNALS_H
28 #include "hw/registerfields.h"
29 #include "tcg/tcg-gvec-desc.h"
32 /* register banks for CPU modes */
42 static inline bool excp_is_internal(int excp)
44 /* Return true if this exception number represents a QEMU-internal
45 * exception that will not be passed to the guest.
47 return excp == EXCP_INTERRUPT
50 || excp == EXCP_HALTED
51 || excp == EXCP_EXCEPTION_EXIT
52 || excp == EXCP_KERNEL_TRAP
53 || excp == EXCP_SEMIHOST;
56 /* Scale factor for generic timers, ie number of ns per tick.
57 * This gives a 62.5MHz timer.
59 #define GTIMER_SCALE 16
61 /* Bit definitions for the v7M CONTROL register */
62 FIELD(V7M_CONTROL, NPRIV, 0, 1)
63 FIELD(V7M_CONTROL, SPSEL, 1, 1)
64 FIELD(V7M_CONTROL, FPCA, 2, 1)
65 FIELD(V7M_CONTROL, SFPA, 3, 1)
67 /* Bit definitions for v7M exception return payload */
68 FIELD(V7M_EXCRET, ES, 0, 1)
69 FIELD(V7M_EXCRET, RES0, 1, 1)
70 FIELD(V7M_EXCRET, SPSEL, 2, 1)
71 FIELD(V7M_EXCRET, MODE, 3, 1)
72 FIELD(V7M_EXCRET, FTYPE, 4, 1)
73 FIELD(V7M_EXCRET, DCRS, 5, 1)
74 FIELD(V7M_EXCRET, S, 6, 1)
75 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
77 /* Minimum value which is a magic number for exception return */
78 #define EXC_RETURN_MIN_MAGIC 0xff000000
79 /* Minimum number which is a magic number for function or exception return
80 * when using v8M security extension
82 #define FNC_RETURN_MIN_MAGIC 0xfefffffe
84 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
85 FIELD(DBGWCR, E, 0, 1)
86 FIELD(DBGWCR, PAC, 1, 2)
87 FIELD(DBGWCR, LSC, 3, 2)
88 FIELD(DBGWCR, BAS, 5, 8)
89 FIELD(DBGWCR, HMC, 13, 1)
90 FIELD(DBGWCR, SSC, 14, 2)
91 FIELD(DBGWCR, LBN, 16, 4)
92 FIELD(DBGWCR, WT, 20, 1)
93 FIELD(DBGWCR, MASK, 24, 5)
94 FIELD(DBGWCR, SSCE, 29, 1)
96 /* We use a few fake FSR values for internal purposes in M profile.
97 * M profile cores don't have A/R format FSRs, but currently our
98 * get_phys_addr() code assumes A/R profile and reports failures via
99 * an A/R format FSR value. We then translate that into the proper
100 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
101 * Mostly the FSR values we use for this are those defined for v7PMSA,
102 * since we share some of that codepath. A few kinds of fault are
103 * only for M profile and have no A/R equivalent, though, so we have
104 * to pick a value from the reserved range (which we never otherwise
105 * generate) to use for these.
106 * These values will never be visible to the guest.
108 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
109 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
112 * raise_exception: Raise the specified exception.
113 * Raise a guest exception with the specified value, syndrome register
114 * and target exception level. This should be called from helper functions,
115 * and never returns because we will longjump back up to the CPU main loop.
117 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
118 uint32_t syndrome, uint32_t target_el);
121 * Similarly, but also use unwinding to restore cpu state.
123 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
124 uint32_t syndrome, uint32_t target_el,
128 * For AArch64, map a given EL to an index in the banked_spsr array.
129 * Note that this mapping and the AArch32 mapping defined in bank_number()
130 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
131 * mandated mapping between each other.
133 static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
135 static const unsigned int map[4] = {
136 [1] = BANK_SVC, /* EL1. */
137 [2] = BANK_HYP, /* EL2. */
138 [3] = BANK_MON, /* EL3. */
140 assert(el >= 1 && el <= 3);
144 /* Map CPU modes onto saved register banks. */
145 static inline int bank_number(int mode)
148 case ARM_CPU_MODE_USR:
149 case ARM_CPU_MODE_SYS:
151 case ARM_CPU_MODE_SVC:
153 case ARM_CPU_MODE_ABT:
155 case ARM_CPU_MODE_UND:
157 case ARM_CPU_MODE_IRQ:
159 case ARM_CPU_MODE_FIQ:
161 case ARM_CPU_MODE_HYP:
163 case ARM_CPU_MODE_MON:
166 g_assert_not_reached();
170 * r14_bank_number: Map CPU mode onto register bank for r14
172 * Given an AArch32 CPU mode, return the index into the saved register
173 * banks to use for the R14 (LR) in that mode. This is the same as
174 * bank_number(), except for the special case of Hyp mode, where
175 * R14 is shared with USR and SYS, unlike its R13 and SPSR.
176 * This should be used as the index into env->banked_r14[], and
177 * bank_number() used for the index into env->banked_r13[] and
178 * env->banked_spsr[].
180 static inline int r14_bank_number(int mode)
182 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
185 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
186 void arm_translate_init(void);
188 void arm_restore_state_to_opc(CPUState *cs,
189 const TranslationBlock *tb,
190 const uint64_t *data);
193 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
194 #endif /* CONFIG_TCG */
196 typedef enum ARMFPRounding {
205 extern const FloatRoundMode arm_rmode_to_sf_map[6];
207 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
209 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
210 return arm_rmode_to_sf_map[rmode];
213 static inline void aarch64_save_sp(CPUARMState *env, int el)
215 if (env->pstate & PSTATE_SP) {
216 env->sp_el[el] = env->xregs[31];
218 env->sp_el[0] = env->xregs[31];
222 static inline void aarch64_restore_sp(CPUARMState *env, int el)
224 if (env->pstate & PSTATE_SP) {
225 env->xregs[31] = env->sp_el[el];
227 env->xregs[31] = env->sp_el[0];
231 static inline void update_spsel(CPUARMState *env, uint32_t imm)
233 unsigned int cur_el = arm_current_el(env);
234 /* Update PSTATE SPSel bit; this requires us to update the
235 * working stack pointer in xregs[31].
237 if (!((imm ^ env->pstate) & PSTATE_SP)) {
240 aarch64_save_sp(env, cur_el);
241 env->pstate = deposit32(env->pstate, 0, 1, imm);
243 /* We rely on illegal updates to SPsel from EL0 to get trapped
244 * at translation time.
246 assert(cur_el >= 1 && cur_el <= 3);
247 aarch64_restore_sp(env, cur_el);
254 * Returns the implementation defined bit-width of physical addresses.
255 * The ARMv8 reference manuals refer to this as PAMax().
257 unsigned int arm_pamax(ARMCPU *cpu);
259 /* Return true if extended addresses are enabled.
260 * This is always the case if our translation regime is 64 bit,
261 * but depends on TTBCR.EAE for 32 bit.
263 static inline bool extended_addresses_enabled(CPUARMState *env)
265 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
266 if (arm_feature(env, ARM_FEATURE_PMSA) &&
267 arm_feature(env, ARM_FEATURE_V8)) {
270 return arm_el_is_aa64(env, 1) ||
271 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
274 /* Update a QEMU watchpoint based on the information the guest has set in the
275 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
277 void hw_watchpoint_update(ARMCPU *cpu, int n);
278 /* Update the QEMU watchpoints for every guest watchpoint. This does a
279 * complete delete-and-reinstate of the QEMU watchpoint list and so is
280 * suitable for use after migration or on reset.
282 void hw_watchpoint_update_all(ARMCPU *cpu);
283 /* Update a QEMU breakpoint based on the information the guest has set in the
284 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
286 void hw_breakpoint_update(ARMCPU *cpu, int n);
287 /* Update the QEMU breakpoints for every guest breakpoint. This does a
288 * complete delete-and-reinstate of the QEMU breakpoint list and so is
289 * suitable for use after migration or on reset.
291 void hw_breakpoint_update_all(ARMCPU *cpu);
293 /* Callback function for checking if a breakpoint should trigger. */
294 bool arm_debug_check_breakpoint(CPUState *cs);
296 /* Callback function for checking if a watchpoint should trigger. */
297 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
299 /* Adjust addresses (in BE32 mode) before testing against watchpoint
302 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
304 /* Callback function for when a watchpoint or breakpoint triggers. */
305 void arm_debug_excp_handler(CPUState *cs);
307 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
308 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
312 static inline void arm_handle_psci_call(ARMCPU *cpu)
314 g_assert_not_reached();
317 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
318 bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
319 /* Actually handle a PSCI call */
320 void arm_handle_psci_call(ARMCPU *cpu);
324 * arm_clear_exclusive: clear the exclusive monitor
326 * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
328 static inline void arm_clear_exclusive(CPUARMState *env)
330 env->exclusive_addr = -1;
334 * ARMFaultType: type of an ARM MMU fault
335 * This corresponds to the v8A pseudocode's Fault enumeration,
336 * with extensions for QEMU internal conditions.
338 typedef enum ARMFaultType {
345 ARMFault_Translation,
346 ARMFault_AddressSize,
347 ARMFault_SyncExternal,
348 ARMFault_SyncExternalOnWalk,
350 ARMFault_SyncParityOnWalk,
351 ARMFault_AsyncParity,
352 ARMFault_AsyncExternal,
354 ARMFault_TLBConflict,
355 ARMFault_UnsuppAtomicUpdate,
358 ARMFault_ICacheMaint,
359 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
360 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
362 ARMFault_GPCFOnOutput,
365 typedef enum ARMGPCF {
374 * ARMMMUFaultInfo: Information describing an ARM MMU Fault
375 * @type: Type of fault
376 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
377 * @level: Table walk level (for translation, access flag and permission faults)
378 * @domain: Domain of the fault address (for non-LPAE CPUs only)
379 * @s2addr: Address that caused a fault at stage 2
380 * @paddr: physical address that caused a fault for gpc
381 * @paddr_space: physical address space that caused a fault for gpc
382 * @stage2: True if we faulted at stage 2
383 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
384 * @s1ns: True if we faulted on a non-secure IPA while in secure state
385 * @ea: True if we should set the EA (external abort type) bit in syndrome
387 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
388 struct ARMMMUFaultInfo {
393 ARMSecuritySpace paddr_space;
403 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
404 * Compare pseudocode EncodeSDFSC(), though unlike that function
405 * we set up a whole FSR-format code including domain field and
406 * putting the high bit of the FSC into bit 10.
408 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
415 case ARMFault_AccessFlag:
416 fsc = fi->level == 1 ? 0x3 : 0x6;
418 case ARMFault_Alignment:
421 case ARMFault_Permission:
422 fsc = fi->level == 1 ? 0xd : 0xf;
424 case ARMFault_Domain:
425 fsc = fi->level == 1 ? 0x9 : 0xb;
427 case ARMFault_Translation:
428 fsc = fi->level == 1 ? 0x5 : 0x7;
430 case ARMFault_SyncExternal:
431 fsc = 0x8 | (fi->ea << 12);
433 case ARMFault_SyncExternalOnWalk:
434 fsc = fi->level == 1 ? 0xc : 0xe;
435 fsc |= (fi->ea << 12);
437 case ARMFault_SyncParity:
440 case ARMFault_SyncParityOnWalk:
441 fsc = fi->level == 1 ? 0x40c : 0x40e;
443 case ARMFault_AsyncParity:
446 case ARMFault_AsyncExternal:
447 fsc = 0x406 | (fi->ea << 12);
452 case ARMFault_TLBConflict:
455 case ARMFault_Lockdown:
458 case ARMFault_Exclusive:
461 case ARMFault_ICacheMaint:
464 case ARMFault_Background:
467 case ARMFault_QEMU_NSCExec:
468 fsc = M_FAKE_FSR_NSC_EXEC;
470 case ARMFault_QEMU_SFault:
471 fsc = M_FAKE_FSR_SFAULT;
474 /* Other faults can't occur in a context that requires a
475 * short-format status code.
477 g_assert_not_reached();
480 fsc |= (fi->domain << 4);
485 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
486 * Compare pseudocode EncodeLDFSC(), though unlike that function
487 * we fill in also the LPAE bit 9 of a DFSR format.
489 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
496 case ARMFault_AddressSize:
497 assert(fi->level >= -1 && fi->level <= 3);
504 case ARMFault_AccessFlag:
505 assert(fi->level >= 0 && fi->level <= 3);
506 fsc = 0b001000 | fi->level;
508 case ARMFault_Permission:
509 assert(fi->level >= 0 && fi->level <= 3);
510 fsc = 0b001100 | fi->level;
512 case ARMFault_Translation:
513 assert(fi->level >= -1 && fi->level <= 3);
517 fsc = 0b000100 | fi->level;
520 case ARMFault_SyncExternal:
521 fsc = 0x10 | (fi->ea << 12);
523 case ARMFault_SyncExternalOnWalk:
524 assert(fi->level >= -1 && fi->level <= 3);
528 fsc = 0b010100 | fi->level;
532 case ARMFault_SyncParity:
535 case ARMFault_SyncParityOnWalk:
536 assert(fi->level >= -1 && fi->level <= 3);
540 fsc = 0b011100 | fi->level;
543 case ARMFault_AsyncParity:
546 case ARMFault_AsyncExternal:
547 fsc = 0x11 | (fi->ea << 12);
549 case ARMFault_Alignment:
555 case ARMFault_TLBConflict:
558 case ARMFault_UnsuppAtomicUpdate:
561 case ARMFault_Lockdown:
564 case ARMFault_Exclusive:
567 case ARMFault_GPCFOnWalk:
568 assert(fi->level >= -1 && fi->level <= 3);
572 fsc = 0b100100 | fi->level;
575 case ARMFault_GPCFOnOutput:
579 /* Other faults can't occur in a context that requires a
580 * long-format status code.
582 g_assert_not_reached();
589 static inline bool arm_extabort_type(MemTxResult result)
591 /* The EA bit in syndromes and fault status registers is an
592 * IMPDEF classification of external aborts. ARM implementations
593 * usually use this to indicate AXI bus Decode error (0) or
594 * Slave error (1); in QEMU we follow that.
596 return result != MEMTX_DECODE_ERROR;
599 #ifdef CONFIG_USER_ONLY
600 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
601 MMUAccessType access_type,
602 bool maperr, uintptr_t ra);
603 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
604 MMUAccessType access_type, uintptr_t ra);
606 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
607 MMUAccessType access_type, int mmu_idx,
608 bool probe, uintptr_t retaddr);
611 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
613 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
616 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
618 if (arm_feature(env, ARM_FEATURE_M)) {
619 return mmu_idx | ARM_MMU_IDX_M;
621 return mmu_idx | ARM_MMU_IDX_A;
625 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
627 /* AArch64 is always a-profile. */
628 return mmu_idx | ARM_MMU_IDX_A;
631 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
633 /* Return the MMU index for a v7M CPU in the specified security state */
634 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
637 * Return true if the stage 1 translation regime is using LPAE
640 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
642 /* Raise a data fault alignment exception for the specified virtual address */
643 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
644 MMUAccessType access_type,
645 int mmu_idx, uintptr_t retaddr);
647 #ifndef CONFIG_USER_ONLY
648 /* arm_cpu_do_transaction_failed: handle a memory system error response
649 * (eg "no device/memory present at address") by raising an external abort
652 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
653 vaddr addr, unsigned size,
654 MMUAccessType access_type,
655 int mmu_idx, MemTxAttrs attrs,
656 MemTxResult response, uintptr_t retaddr);
659 /* Call any registered EL change hooks */
660 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
662 ARMELChangeHook *hook, *next;
663 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
664 hook->hook(cpu, hook->opaque);
667 static inline void arm_call_el_change_hook(ARMCPU *cpu)
669 ARMELChangeHook *hook, *next;
670 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
671 hook->hook(cpu, hook->opaque);
675 /* Return true if this address translation regime has two ranges. */
676 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
679 case ARMMMUIdx_Stage1_E0:
680 case ARMMMUIdx_Stage1_E1:
681 case ARMMMUIdx_Stage1_E1_PAN:
682 case ARMMMUIdx_E10_0:
683 case ARMMMUIdx_E10_1:
684 case ARMMMUIdx_E10_1_PAN:
685 case ARMMMUIdx_E20_0:
686 case ARMMMUIdx_E20_2:
687 case ARMMMUIdx_E20_2_PAN:
694 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
697 case ARMMMUIdx_Stage1_E1_PAN:
698 case ARMMMUIdx_E10_1_PAN:
699 case ARMMMUIdx_E20_2_PAN:
706 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
708 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
711 /* Return the exception level which controls this address translation regime */
712 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
715 case ARMMMUIdx_E20_0:
716 case ARMMMUIdx_E20_2:
717 case ARMMMUIdx_E20_2_PAN:
718 case ARMMMUIdx_Stage2:
719 case ARMMMUIdx_Stage2_S:
724 case ARMMMUIdx_E10_0:
725 case ARMMMUIdx_Stage1_E0:
726 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
727 case ARMMMUIdx_Stage1_E1:
728 case ARMMMUIdx_Stage1_E1_PAN:
729 case ARMMMUIdx_E10_1:
730 case ARMMMUIdx_E10_1_PAN:
731 case ARMMMUIdx_MPrivNegPri:
732 case ARMMMUIdx_MUserNegPri:
733 case ARMMMUIdx_MPriv:
734 case ARMMMUIdx_MUser:
735 case ARMMMUIdx_MSPrivNegPri:
736 case ARMMMUIdx_MSUserNegPri:
737 case ARMMMUIdx_MSPriv:
738 case ARMMMUIdx_MSUser:
741 g_assert_not_reached();
745 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
748 case ARMMMUIdx_E20_0:
749 case ARMMMUIdx_Stage1_E0:
750 case ARMMMUIdx_MUser:
751 case ARMMMUIdx_MSUser:
752 case ARMMMUIdx_MUserNegPri:
753 case ARMMMUIdx_MSUserNegPri:
757 case ARMMMUIdx_E10_0:
758 case ARMMMUIdx_E10_1:
759 case ARMMMUIdx_E10_1_PAN:
760 g_assert_not_reached();
764 /* Return the SCTLR value which controls this address translation regime */
765 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
767 return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
771 * These are the fields in VTCR_EL2 which affect both the Secure stage 2
772 * and the Non-Secure stage 2 translation regimes (and hence which are
773 * not present in VSTCR_EL2).
775 #define VTCR_SHARED_FIELD_MASK \
776 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
777 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
780 /* Return the value of the TCR controlling this translation regime */
781 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
783 if (mmu_idx == ARMMMUIdx_Stage2) {
784 return env->cp15.vtcr_el2;
786 if (mmu_idx == ARMMMUIdx_Stage2_S) {
788 * Secure stage 2 shares fields from VTCR_EL2. We merge those
789 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
790 * value so the callers don't need to special case this.
792 * If a future architecture change defines bits in VSTCR_EL2 that
793 * overlap with these VTCR_EL2 fields we may need to revisit this.
795 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
796 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
799 return env->cp15.tcr_el[regime_el(env, mmu_idx)];
802 /* Return true if the translation regime is using LPAE format page tables */
803 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
805 int el = regime_el(env, mmu_idx);
806 if (el == 2 || arm_el_is_aa64(env, el)) {
809 if (arm_feature(env, ARM_FEATURE_PMSA) &&
810 arm_feature(env, ARM_FEATURE_V8)) {
813 if (arm_feature(env, ARM_FEATURE_LPAE)
814 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
821 * arm_num_brps: Return number of implemented breakpoints.
822 * Note that the ID register BRPS field is "number of bps - 1",
823 * and we return the actual number of breakpoints.
825 static inline int arm_num_brps(ARMCPU *cpu)
827 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
828 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
830 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
835 * arm_num_wrps: Return number of implemented watchpoints.
836 * Note that the ID register WRPS field is "number of wps - 1",
837 * and we return the actual number of watchpoints.
839 static inline int arm_num_wrps(ARMCPU *cpu)
841 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
842 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
844 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
849 * arm_num_ctx_cmps: Return number of implemented context comparators.
850 * Note that the ID register CTX_CMPS field is "number of cmps - 1",
851 * and we return the actual number of comparators.
853 static inline int arm_num_ctx_cmps(ARMCPU *cpu)
855 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
856 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
858 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
863 * v7m_using_psp: Return true if using process stack pointer
864 * Return true if the CPU is currently using the process stack
865 * pointer, or false if it is using the main stack pointer.
867 static inline bool v7m_using_psp(CPUARMState *env)
869 /* Handler mode always uses the main stack; for thread mode
870 * the CONTROL.SPSEL bit determines the answer.
871 * Note that in v7M it is not possible to be in Handler mode with
872 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
874 return !arm_v7m_is_handler_mode(env) &&
875 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
879 * v7m_sp_limit: Return SP limit for current CPU state
880 * Return the SP limit value for the current CPU security state
883 static inline uint32_t v7m_sp_limit(CPUARMState *env)
885 if (v7m_using_psp(env)) {
886 return env->v7m.psplim[env->v7m.secure];
888 return env->v7m.msplim[env->v7m.secure];
894 * Return true if the v7M CPACR permits access to the FPU for the specified
895 * security state and privilege level.
897 static inline bool v7m_cpacr_pass(CPUARMState *env,
898 bool is_secure, bool is_priv)
900 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
902 case 2: /* UNPREDICTABLE: we treat like 0 */
909 g_assert_not_reached();
914 * aarch32_mode_name(): Return name of the AArch32 CPU mode
915 * @psr: Program Status Register indicating CPU mode
917 * Returns, for debug logging purposes, a printable representation
918 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
919 * the low bits of the specified PSR.
921 static inline const char *aarch32_mode_name(uint32_t psr)
923 static const char cpu_mode_names[16][4] = {
924 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
925 "???", "???", "hyp", "und", "???", "???", "???", "sys"
928 return cpu_mode_names[psr & 0xf];
932 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
934 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
935 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
936 * Must be called with the iothread lock held.
938 void arm_cpu_update_virq(ARMCPU *cpu);
941 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
943 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
944 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
945 * Must be called with the iothread lock held.
947 void arm_cpu_update_vfiq(ARMCPU *cpu);
950 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
952 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
953 * following a change to the HCR_EL2.VSE bit.
955 void arm_cpu_update_vserr(ARMCPU *cpu);
959 * @env: The cpu environment
960 * @el: The EL to use.
962 * Return the full ARMMMUIdx for the translation regime for EL.
964 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
968 * @env: The cpu environment
970 * Return the full ARMMMUIdx for the current translation regime.
972 ARMMMUIdx arm_mmu_idx(CPUARMState *env);
975 * arm_stage1_mmu_idx:
976 * @env: The cpu environment
978 * Return the ARMMMUIdx for the stage1 traversal for the current regime.
980 #ifdef CONFIG_USER_ONLY
981 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
983 return ARMMMUIdx_Stage1_E0;
985 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
987 return ARMMMUIdx_Stage1_E0;
990 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
991 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
995 * arm_mmu_idx_is_stage1_of_2:
996 * @mmu_idx: The ARMMMUIdx to test
998 * Return true if @mmu_idx is a NOTLB mmu_idx that is the
999 * first stage of a two stage regime.
1001 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1004 case ARMMMUIdx_Stage1_E0:
1005 case ARMMMUIdx_Stage1_E1:
1006 case ARMMMUIdx_Stage1_E1_PAN:
1013 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1014 const ARMISARegisters *id)
1016 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1018 if ((features >> ARM_FEATURE_V4T) & 1) {
1021 if ((features >> ARM_FEATURE_V5) & 1) {
1022 valid |= CPSR_Q; /* V5TE in reality*/
1024 if ((features >> ARM_FEATURE_V6) & 1) {
1025 valid |= CPSR_E | CPSR_GE;
1027 if ((features >> ARM_FEATURE_THUMB2) & 1) {
1030 if (isar_feature_aa32_jazelle(id)) {
1033 if (isar_feature_aa32_pan(id)) {
1036 if (isar_feature_aa32_dit(id)) {
1039 if (isar_feature_aa32_ssbs(id)) {
1046 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1050 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1051 if (isar_feature_aa64_bti(id)) {
1052 valid |= PSTATE_BTYPE;
1054 if (isar_feature_aa64_pan(id)) {
1055 valid |= PSTATE_PAN;
1057 if (isar_feature_aa64_uao(id)) {
1058 valid |= PSTATE_UAO;
1060 if (isar_feature_aa64_dit(id)) {
1061 valid |= PSTATE_DIT;
1063 if (isar_feature_aa64_ssbs(id)) {
1064 valid |= PSTATE_SSBS;
1066 if (isar_feature_aa64_mte(id)) {
1067 valid |= PSTATE_TCO;
1073 /* Granule size (i.e. page size) */
1074 typedef enum ARMGranuleSize {
1075 /* Same order as TG0 encoding */
1083 * arm_granule_bits: Return address size of the granule in bits
1085 * Return the address size of the granule in bits. This corresponds
1086 * to the pseudocode TGxGranuleBits().
1088 static inline int arm_granule_bits(ARMGranuleSize gran)
1098 g_assert_not_reached();
1103 * Parameters of a given virtual address, as extracted from the
1104 * translation control register (TCR) for a given regime.
1106 typedef struct ARMVAParameters {
1110 unsigned select : 1;
1114 bool tsz_oob : 1; /* tsz has been clamped to legal range */
1118 ARMGranuleSize gran : 2;
1122 * aa64_va_parameters: Return parameters for an AArch64 virtual address
1124 * @va: virtual address to look up
1125 * @mmu_idx: determines translation regime to use
1126 * @data: true if this is a data access
1127 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1128 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1130 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1131 ARMMMUIdx mmu_idx, bool data,
1134 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1135 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1136 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1138 /* Determine if allocation tags are available. */
1139 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1143 && arm_feature(env, ARM_FEATURE_EL3)
1144 && !(env->cp15.scr_el3 & SCR_ATA)) {
1147 if (el < 2 && arm_is_el2_enabled(env)) {
1148 uint64_t hcr = arm_hcr_el2_eff(env);
1149 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1153 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1157 #ifndef CONFIG_USER_ONLY
1159 /* Security attributes for an address, as returned by v8m_security_lookup. */
1160 typedef struct V8M_SAttributes {
1161 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1170 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1171 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1172 bool secure, V8M_SAttributes *sattrs);
1174 /* Cacheability and shareability attributes for a memory access */
1175 typedef struct ARMCacheAttrs {
1177 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1178 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1180 unsigned int attrs:8;
1181 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1182 bool is_s2_format:1;
1183 bool guarded:1; /* guarded bit of the v8-64 PTE */
1186 /* Fields that are valid upon success. */
1187 typedef struct GetPhysAddrResult {
1189 ARMCacheAttrs cacheattrs;
1190 } GetPhysAddrResult;
1193 * get_phys_addr: get the physical address for a virtual address
1195 * @address: virtual address to get physical address for
1196 * @access_type: 0 for read, 1 for write, 2 for execute
1197 * @mmu_idx: MMU index indicating required translation regime
1198 * @result: set on translation success.
1199 * @fi: set to fault info if the translation fails
1201 * Find the physical address corresponding to the given virtual address,
1202 * by doing a translation table walk on MMU based systems or using the
1203 * MPU state on MPU based systems.
1205 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1206 * prot and page_size may not be filled in, and the populated fsr value provides
1207 * information on why the translation aborted, in the format of a
1208 * DFSR/IFSR fault register, with the following caveats:
1209 * * we honour the short vs long DFSR format differences.
1210 * * the WnR bit is never set (the caller must do this).
1211 * * for PSMAv5 based systems we don't bother to return a full FSR format
1214 bool get_phys_addr(CPUARMState *env, target_ulong address,
1215 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1216 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1217 __attribute__((nonnull));
1220 * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1223 * @address: virtual address to get physical address for
1224 * @access_type: 0 for read, 1 for write, 2 for execute
1225 * @mmu_idx: MMU index indicating required translation regime
1226 * @space: security space for the access
1227 * @result: set on translation success.
1228 * @fi: set to fault info if the translation fails
1230 * Similar to get_phys_addr, but use the given security space and don't perform
1231 * a Granule Protection Check on the resulting address.
1233 bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
1234 MMUAccessType access_type,
1235 ARMMMUIdx mmu_idx, ARMSecuritySpace space,
1236 GetPhysAddrResult *result,
1237 ARMMMUFaultInfo *fi)
1238 __attribute__((nonnull));
1240 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1241 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1242 bool is_secure, GetPhysAddrResult *result,
1243 ARMMMUFaultInfo *fi, uint32_t *mregion);
1245 void arm_log_exception(CPUState *cs);
1247 #endif /* !CONFIG_USER_ONLY */
1250 * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1251 * the same simd_desc() encoding due to restrictions on size.
1252 * Use these instead.
1254 FIELD(PREDDESC, OPRSZ, 0, 6)
1255 FIELD(PREDDESC, ESZ, 6, 2)
1256 FIELD(PREDDESC, DATA, 8, 24)
1259 * The SVE simd_data field, for memory ops, contains either
1260 * rd (5 bits) or a shift count (2 bits).
1262 #define SVE_MTEDESC_SHIFT 5
1264 /* Bits within a descriptor passed to the helper_mte_check* functions. */
1265 FIELD(MTEDESC, MIDX, 0, 4)
1266 FIELD(MTEDESC, TBI, 4, 2)
1267 FIELD(MTEDESC, TCMA, 6, 2)
1268 FIELD(MTEDESC, WRITE, 8, 1)
1269 FIELD(MTEDESC, ALIGN, 9, 3)
1270 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12) /* size - 1 */
1272 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1273 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1276 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1278 * @ptr: start address of memory region (dirty pointer)
1279 * @size: length of region (guaranteed not to cross a page boundary)
1280 * @desc: MTEDESC descriptor word (0 means no MTE checks)
1281 * Returns: the size of the region that can be copied without hitting
1282 * an MTE tag failure
1284 * Note that we assume that the caller has already checked the TBI
1285 * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1288 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
1292 * mte_check_fail: Record an MTE tag check failure
1294 * @desc: MTEDESC descriptor word
1295 * @dirty_ptr: Failing dirty address
1298 * This may never return (if the MTE tag checks are configured to fault).
1300 void mte_check_fail(CPUARMState *env, uint32_t desc,
1301 uint64_t dirty_ptr, uintptr_t ra);
1303 static inline int allocation_tag_from_addr(uint64_t ptr)
1305 return extract64(ptr, 56, 4);
1308 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1310 return deposit64(ptr, 56, 4, rtag);
1313 /* Return true if tbi bits mean that the access is checked. */
1314 static inline bool tbi_check(uint32_t desc, int bit55)
1316 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1319 /* Return true if tcma bits mean that the access is unchecked. */
1320 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1323 * We had extracted bit55 and ptr_tag for other reasons, so fold
1324 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1326 bool match = ((ptr_tag + bit55) & 0xf) == 0;
1327 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1328 return tcma && match;
1332 * For TBI, ideally, we would do nothing. Proper behaviour on fault is
1333 * for the tag to be present in the FAR_ELx register. But for user-only
1334 * mode, we do not have a TLB with which to implement this, so we must
1335 * remove the top byte.
1337 static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1339 #ifdef CONFIG_USER_ONLY
1340 /* TBI0 is known to be enabled, while TBI1 is disabled. */
1341 ptr &= sextract64(ptr, 0, 56);
1346 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1348 #ifdef CONFIG_USER_ONLY
1349 int64_t clean_ptr = sextract64(ptr, 0, 56);
1350 if (tbi_check(desc, clean_ptr < 0)) {
1357 /* Values for M-profile PSR.ECI for MVE insns */
1359 ECI_NONE = 0, /* No completed beats */
1360 ECI_A0 = 1, /* Completed: A0 */
1361 ECI_A0A1 = 2, /* Completed: A0, A1 */
1363 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1364 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1365 /* All other values reserved */
1368 /* Definitions for the PMU registers */
1369 #define PMCRN_MASK 0xf800
1370 #define PMCRN_SHIFT 11
1380 * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1381 * which can be written as 1 to trigger behaviour but which stay RAZ).
1383 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1385 #define PMXEVTYPER_P 0x80000000
1386 #define PMXEVTYPER_U 0x40000000
1387 #define PMXEVTYPER_NSK 0x20000000
1388 #define PMXEVTYPER_NSU 0x10000000
1389 #define PMXEVTYPER_NSH 0x08000000
1390 #define PMXEVTYPER_M 0x04000000
1391 #define PMXEVTYPER_MT 0x02000000
1392 #define PMXEVTYPER_EVTCOUNT 0x0000ffff
1393 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1394 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1395 PMXEVTYPER_M | PMXEVTYPER_MT | \
1396 PMXEVTYPER_EVTCOUNT)
1398 #define PMCCFILTR 0xf8000000
1399 #define PMCCFILTR_M PMXEVTYPER_M
1400 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
1402 static inline uint32_t pmu_num_counters(CPUARMState *env)
1404 ARMCPU *cpu = env_archcpu(env);
1406 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1409 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1410 static inline uint64_t pmu_counter_mask(CPUARMState *env)
1412 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1415 #ifdef TARGET_AARCH64
1416 int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg);
1417 int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg);
1418 int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg);
1419 int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg);
1420 int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg);
1421 int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg);
1422 int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg);
1423 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1424 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1425 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1426 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1427 void aarch64_max_tcg_initfn(Object *obj);
1428 void aarch64_add_pauth_properties(Object *obj);
1429 void aarch64_add_sve_properties(Object *obj);
1430 void aarch64_add_sme_properties(Object *obj);
1433 /* Read the CONTROL register as the MRS instruction would. */
1434 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1437 * Return a pointer to the location where we currently store the
1438 * stack pointer for the requested security state and thread mode.
1439 * This pointer will become invalid if the CPU state is updated
1440 * such that the stack pointers are switched around (eg changing
1441 * the SPSEL control bit).
1443 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1444 bool threadmode, bool spsel);
1446 bool el_is_in_host(CPUARMState *env, int el);
1448 void aa32_max_features(ARMCPU *cpu);
1449 int exception_target_el(CPUARMState *env);
1450 bool arm_singlestep_active(CPUARMState *env);
1451 bool arm_generate_debug_exceptions(CPUARMState *env);
1455 * @param: parameters defining the MMU setup
1457 * Return a mask of the address bits that contain the authentication code,
1458 * given the MMU config defined by @param.
1460 static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1462 int bot_pac_bit = 64 - param.tsz;
1463 int top_pac_bit = 64 - 8 * param.tbi;
1465 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1468 /* Add the cpreg definitions for debug related system registers */
1469 void define_debug_regs(ARMCPU *cpu);
1471 /* Effective value of MDCR_EL2 */
1472 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1474 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1477 /* Powers of 2 for sve_vq_map et al. */
1478 #define SVE_VQ_POW2_MAP \
1479 ((1 << (1 - 1)) | (1 << (2 - 1)) | \
1480 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1483 * Return true if it is possible to take a fine-grained-trap to EL2.
1485 static inline bool arm_fgt_active(CPUARMState *env, int el)
1488 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1489 * that can affect EL0, but it is harmless to do the test also for
1490 * traps on registers that are only accessible at EL1 because if the test
1491 * returns true then we can't be executing at EL1 anyway.
1492 * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1493 * traps from AArch32 only happen for the EL0 is AArch32 case.
1495 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1496 el < 2 && arm_is_el2_enabled(env) &&
1497 arm_el_is_aa64(env, 1) &&
1498 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1499 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1502 void assert_hflags_rebuild_correctly(CPUARMState *env);
1505 * Although the ARM implementation of hardware assisted debugging
1506 * allows for different breakpoints per-core, the current GDB
1507 * interface treats them as a global pool of registers (which seems to
1508 * be the case for x86, ppc and s390). As a result we store one copy
1509 * of registers which is used for all active cores.
1511 * Write access is serialised by virtue of the GDB protocol which
1512 * updates things. Read access (i.e. when the values are copied to the
1513 * vCPU) is also gated by GDB's run control.
1515 * This is not unreasonable as most of the time debugging kernels you
1516 * never know which core will eventually execute your function.
1525 * The watchpoint registers can cover more area than the requested
1526 * watchpoint so we need to store the additional information
1527 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1528 * when the watchpoint is hit.
1533 CPUWatchpoint details;
1536 /* Maximum and current break/watch point counts */
1537 extern int max_hw_bps, max_hw_wps;
1538 extern GArray *hw_breakpoints, *hw_watchpoints;
1540 #define cur_hw_wps (hw_watchpoints->len)
1541 #define cur_hw_bps (hw_breakpoints->len)
1542 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1543 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1545 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1546 int insert_hw_breakpoint(target_ulong pc);
1547 int delete_hw_breakpoint(target_ulong pc);
1549 bool check_watchpoint_in_range(int i, target_ulong addr);
1550 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
1551 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1552 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);