2 * ARM v8.5-MemTag Operations
4 * Copyright (c) 2020 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/ram_addr.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "hw/core/tcg-cpu-ops.h"
29 #include "qapi/error.h"
30 #include "qemu/guest-random.h"
33 static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
35 if (exclude == 0xffff) {
39 while (exclude & (1 << tag)) {
46 } while (exclude & (1 << tag));
47 } while (--offset > 0);
53 * allocation_tag_mem_probe:
54 * @env: the cpu environment
55 * @ptr_mmu_idx: the addressing regime to use for the virtual address
56 * @ptr: the virtual address for which to look up tag memory
57 * @ptr_access: the access to use for the virtual address
58 * @ptr_size: the number of bytes in the normal memory access
59 * @tag_access: the access to use for the tag memory
60 * @probe: true to merely probe, never taking an exception
61 * @ra: the return address for exception handling
63 * Our tag memory is formatted as a sequence of little-endian nibbles.
64 * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
65 * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
66 * for the higher addr.
68 * Here, resolve the physical address from the virtual address, and return
69 * a pointer to the corresponding tag byte.
71 * If there is no tag storage corresponding to @ptr, return NULL.
73 * If the page is inaccessible for @ptr_access, or has a watchpoint, there are
75 * (1) probe = true, ra = 0 : pure probe -- we return NULL if the page is not
76 * accessible, and do not take watchpoint traps. The calling code must
77 * handle those cases in the right priority compared to MTE traps.
78 * (2) probe = false, ra = 0 : probe, no fault expected -- the caller guarantees
79 * that the page is going to be accessible. We will take watchpoint traps.
80 * (3) probe = false, ra != 0 : non-probe -- we will take both memory access
81 * traps and watchpoint traps.
82 * (probe = true, ra != 0 is invalid and will assert.)
84 static uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
85 uint64_t ptr, MMUAccessType ptr_access,
86 int ptr_size, MMUAccessType tag_access,
87 bool probe, uintptr_t ra)
89 #ifdef CONFIG_USER_ONLY
90 uint64_t clean_ptr = useronly_clean_ptr(ptr);
91 int flags = page_get_flags(clean_ptr);
95 assert(!(probe && ra));
97 if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
98 cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
99 !(flags & PAGE_VALID), ra);
102 /* Require both MAP_ANON and PROT_MTE for the page. */
103 if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
107 tags = page_get_target_data(clean_ptr);
109 index = extract32(ptr, LOG2_TAG_GRANULE + 1,
110 TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
113 CPUTLBEntryFull *full;
116 hwaddr ptr_paddr, tag_paddr, xlat;
119 AddressSpace *tag_as;
123 * Probe the first byte of the virtual address. This raises an
124 * exception for inaccessible pages, and resolves the virtual address
125 * into the softmmu tlb.
127 * When RA == 0, this is either a pure probe or a no-fault-expected probe.
128 * Indicate to probe_access_flags no-fault, then either return NULL
129 * for the pure probe, or assert that we received a valid page for the
130 * no-fault-expected probe.
132 flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
133 ra == 0, &host, &full, ra);
134 if (probe && (flags & TLB_INVALID_MASK)) {
137 assert(!(flags & TLB_INVALID_MASK));
139 /* If the virtual page MemAttr != Tagged, access unchecked. */
140 if (full->pte_attrs != 0xf0) {
145 * If not backed by host ram, there is no tag storage: access unchecked.
146 * This is probably a guest os bug though, so log it.
148 if (unlikely(flags & TLB_MMIO)) {
149 qemu_log_mask(LOG_GUEST_ERROR,
150 "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
151 "but is not backed by host ram\n", ptr);
156 * Remember these values across the second lookup below,
157 * which may invalidate this pointer via tlb resize.
159 ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
164 * The Normal memory access can extend to the next page. E.g. a single
165 * 8-byte access to the last byte of a page will check only the last
166 * tag on the first page.
167 * Any page access exception has priority over tag check exception.
169 in_page = -(ptr | TARGET_PAGE_MASK);
170 if (unlikely(ptr_size > in_page)) {
171 flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
172 ptr_mmu_idx, ra == 0, &host, &full, ra);
173 assert(!(flags & TLB_INVALID_MASK));
176 /* Any debug exception has priority over a tag check exception. */
177 if (!probe && unlikely(flags & TLB_WATCHPOINT)) {
178 int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
180 cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
183 /* Convert to the physical address in tag space. */
184 tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
186 /* Look up the address in tag space. */
187 tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
188 tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
189 mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
190 tag_access == MMU_DATA_STORE, attrs);
193 * Note that @mr will never be NULL. If there is nothing in the address
194 * space at @tag_paddr, the translation will return the unallocated memory
195 * region. For our purposes, the result must be ram.
197 if (unlikely(!memory_region_is_ram(mr))) {
198 /* ??? Failure is a board configuration error. */
199 qemu_log_mask(LOG_UNIMP,
200 "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
201 "Normal Memory @ 0x%" HWADDR_PRIx "\n",
202 tag_paddr, ptr_paddr);
207 * Ensure the tag memory is dirty on write, for migration.
208 * Tag memory can never contain code or display memory (vga).
210 if (tag_access == MMU_DATA_STORE) {
211 ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
212 cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
215 return memory_region_get_ram_ptr(mr) + xlat;
219 static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
220 uint64_t ptr, MMUAccessType ptr_access,
221 int ptr_size, MMUAccessType tag_access,
224 return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access,
225 ptr_size, tag_access, false, ra);
228 uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
230 uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
231 int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
232 int start = extract32(env->cp15.rgsr_el1, 0, 4);
233 int seed = extract32(env->cp15.rgsr_el1, 8, 16);
237 * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
238 * deterministic algorithm. Except that with RRND==1 the kernel is
239 * not required to have set RGSR_EL1.SEED != 0, which is required for
240 * the deterministic algorithm to function. So we force a non-zero
241 * SEED for that case.
243 if (unlikely(seed == 0) && rrnd) {
248 if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
250 * Failed, for unknown reasons in the crypto subsystem.
251 * Best we can do is log the reason and use a constant seed.
253 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
254 error_get_pretty(err));
263 for (i = offset = 0; i < 4; ++i) {
264 /* NextRandomTagBit */
265 int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
266 extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
267 seed = (top << 15) | (seed >> 1);
270 rtag = choose_nonexcluded_tag(start, offset, exclude);
271 env->cp15.rgsr_el1 = rtag | (seed << 8);
273 return address_with_allocation_tag(rn, rtag);
276 uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
277 int32_t offset, uint32_t tag_offset)
279 int start_tag = allocation_tag_from_addr(ptr);
280 uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
281 int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
283 return address_with_allocation_tag(ptr + offset, rtag);
286 static int load_tag1(uint64_t ptr, uint8_t *mem)
288 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
289 return extract32(*mem, ofs, 4);
292 uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
294 int mmu_idx = cpu_mmu_index(env, false);
298 /* Trap if accessing an invalid page. */
299 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
300 MMU_DATA_LOAD, GETPC());
302 /* Load if page supports tags. */
304 rtag = load_tag1(ptr, mem);
307 return address_with_allocation_tag(xt, rtag);
310 static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
312 if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
313 arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
314 cpu_mmu_index(env, false), ra);
315 g_assert_not_reached();
319 /* For use in a non-parallel context, store to the given nibble. */
320 static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
322 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
323 *mem = deposit32(*mem, ofs, 4, tag);
326 /* For use in a parallel context, atomically store to the given nibble. */
327 static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
329 int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
330 uint8_t old = qatomic_read(mem);
333 uint8_t new = deposit32(old, ofs, 4, tag);
334 uint8_t cmp = qatomic_cmpxchg(mem, old, new);
335 if (likely(cmp == old)) {
342 typedef void stg_store1(uint64_t, uint8_t *, int);
344 static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
345 uintptr_t ra, stg_store1 store1)
347 int mmu_idx = cpu_mmu_index(env, false);
350 check_tag_aligned(env, ptr, ra);
352 /* Trap if accessing an invalid page. */
353 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
356 /* Store if page supports tags. */
358 store1(ptr, mem, allocation_tag_from_addr(xt));
362 void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
364 do_stg(env, ptr, xt, GETPC(), store_tag1);
367 void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
369 do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
372 void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
374 int mmu_idx = cpu_mmu_index(env, false);
375 uintptr_t ra = GETPC();
377 check_tag_aligned(env, ptr, ra);
378 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
381 static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
382 uintptr_t ra, stg_store1 store1)
384 int mmu_idx = cpu_mmu_index(env, false);
385 int tag = allocation_tag_from_addr(xt);
386 uint8_t *mem1, *mem2;
388 check_tag_aligned(env, ptr, ra);
391 * Trap if accessing an invalid page(s).
392 * This takes priority over !allocation_tag_access_enabled.
394 if (ptr & TAG_GRANULE) {
395 /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
396 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
397 TAG_GRANULE, MMU_DATA_STORE, ra);
398 mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
399 MMU_DATA_STORE, TAG_GRANULE,
402 /* Store if page(s) support tags. */
404 store1(TAG_GRANULE, mem1, tag);
407 store1(0, mem2, tag);
410 /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
411 mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
412 2 * TAG_GRANULE, MMU_DATA_STORE, ra);
415 qatomic_set(mem1, tag);
420 void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
422 do_st2g(env, ptr, xt, GETPC(), store_tag1);
425 void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
427 do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
430 void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
432 int mmu_idx = cpu_mmu_index(env, false);
433 uintptr_t ra = GETPC();
434 int in_page = -(ptr | TARGET_PAGE_MASK);
436 check_tag_aligned(env, ptr, ra);
438 if (likely(in_page >= 2 * TAG_GRANULE)) {
439 probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
441 probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
442 probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
446 uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
448 int mmu_idx = cpu_mmu_index(env, false);
449 uintptr_t ra = GETPC();
450 int gm_bs = env_archcpu(env)->gm_blocksize;
451 int gm_bs_bytes = 4 << gm_bs;
456 ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
458 /* Trap if accessing an invalid page. */
459 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
460 gm_bs_bytes, MMU_DATA_LOAD, ra);
462 /* The tag is squashed to zero if the page does not support tags. */
468 * The ordering of elements within the word corresponds to
469 * a little-endian operation. Computation of shift comes from
471 * index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE>
472 * data<index*4+3:index*4> = tag
474 * Because of the alignment of ptr above, BS=6 has shift=0.
475 * All memory operations are aligned. Defer support for BS=2,
476 * requiring insertion or extraction of a nibble, until we
477 * support a cpu that requires it.
481 /* 32 bytes -> 2 tags -> 8 result bits */
482 ret = *(uint8_t *)tag_mem;
485 /* 64 bytes -> 4 tags -> 16 result bits */
486 ret = cpu_to_le16(*(uint16_t *)tag_mem);
489 /* 128 bytes -> 8 tags -> 32 result bits */
490 ret = cpu_to_le32(*(uint32_t *)tag_mem);
493 /* 256 bytes -> 16 tags -> 64 result bits */
494 return cpu_to_le64(*(uint64_t *)tag_mem);
497 * CPU configured with unsupported/invalid gm blocksize.
498 * This is detected early in arm_cpu_realizefn.
500 g_assert_not_reached();
502 shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
506 void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
508 int mmu_idx = cpu_mmu_index(env, false);
509 uintptr_t ra = GETPC();
510 int gm_bs = env_archcpu(env)->gm_blocksize;
511 int gm_bs_bytes = 4 << gm_bs;
515 ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
517 /* Trap if accessing an invalid page. */
518 tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
519 gm_bs_bytes, MMU_DATA_LOAD, ra);
522 * Tag store only happens if the page support tags,
523 * and if the OS has enabled access to the tags.
529 /* See LDGM for comments on BS and on shift. */
530 shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
534 /* 32 bytes -> 2 tags -> 8 result bits */
535 *(uint8_t *)tag_mem = val;
538 /* 64 bytes -> 4 tags -> 16 result bits */
539 *(uint16_t *)tag_mem = cpu_to_le16(val);
542 /* 128 bytes -> 8 tags -> 32 result bits */
543 *(uint32_t *)tag_mem = cpu_to_le32(val);
546 /* 256 bytes -> 16 tags -> 64 result bits */
547 *(uint64_t *)tag_mem = cpu_to_le64(val);
550 /* cpu configured with unsupported gm blocksize. */
551 g_assert_not_reached();
555 void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
557 uintptr_t ra = GETPC();
558 int mmu_idx = cpu_mmu_index(env, false);
559 int log2_dcz_bytes, log2_tag_bytes;
560 intptr_t dcz_bytes, tag_bytes;
564 * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
565 * i.e. 32 bytes, which is an unreasonably small dcz anyway,
566 * to make sure that we can access one complete tag byte here.
568 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
569 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
570 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
571 tag_bytes = (intptr_t)1 << log2_tag_bytes;
574 mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
577 int tag_pair = (val & 0xf) * 0x11;
578 memset(mem, tag_pair, tag_bytes);
582 static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
583 uint64_t dirty_ptr, uintptr_t ra)
587 env->exception.vaddress = dirty_ptr;
589 is_write = FIELD_EX32(desc, MTEDESC, WRITE);
590 syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
592 raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
593 g_assert_not_reached();
596 static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
597 uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
601 if (regime_has_2_ranges(arm_mmu_idx)) {
602 select = extract64(dirty_ptr, 55, 1);
606 env->cp15.tfsr_el[el] |= 1 << select;
607 #ifdef CONFIG_USER_ONLY
609 * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
610 * which then sends a SIGSEGV when the thread is next scheduled.
611 * This cpu will return to the main loop at the end of the TB,
612 * which is rather sooner than "normal". But the alternative
613 * is waiting until the next syscall.
615 qemu_cpu_kick(env_cpu(env));
619 /* Record a tag check failure. */
620 void mte_check_fail(CPUARMState *env, uint32_t desc,
621 uint64_t dirty_ptr, uintptr_t ra)
623 int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
624 ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
628 reg_el = regime_el(env, arm_mmu_idx);
629 sctlr = env->cp15.sctlr_el[reg_el];
631 switch (arm_mmu_idx) {
632 case ARMMMUIdx_E10_0:
633 case ARMMMUIdx_E20_0:
635 tcf = extract64(sctlr, 38, 2);
639 tcf = extract64(sctlr, 40, 2);
644 /* Tag check fail causes a synchronous exception. */
645 mte_sync_check_fail(env, desc, dirty_ptr, ra);
650 * Tag check fail does not affect the PE.
651 * We eliminate this case by not setting MTE_ACTIVE
652 * in tb_flags, so that we never make this runtime call.
654 g_assert_not_reached();
657 /* Tag check fail causes asynchronous flag set. */
658 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
663 * Tag check fail causes asynchronous flag set for stores, or
664 * a synchronous exception for loads.
666 if (FIELD_EX32(desc, MTEDESC, WRITE)) {
667 mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
669 mte_sync_check_fail(env, desc, dirty_ptr, ra);
677 * @tag: tag memory to test
678 * @odd: true to begin testing at tags at odd nibble
679 * @cmp: the tag to compare against
680 * @count: number of tags to test
682 * Return the number of successful tests.
683 * Thus a return value < @count indicates a failure.
685 * A note about sizes: count is expected to be small.
687 * The most common use will be LDP/STP of two integer registers,
688 * which means 16 bytes of memory touching at most 2 tags, but
689 * often the access is aligned and thus just 1 tag.
691 * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
692 * touching at most 5 tags. SVE LDR/STR (vector) with the default
693 * vector length is also 64 bytes; the maximum architectural length
694 * is 256 bytes touching at most 9 tags.
696 * The loop below uses 7 logical operations and 1 memory operation
697 * per tag pair. An implementation that loads an aligned word and
698 * uses masking to ignore adjacent tags requires 18 logical operations
699 * and thus does not begin to pay off until 6 tags.
700 * Which, according to the survey above, is unlikely to be common.
702 static int checkN(uint8_t *mem, int odd, int cmp, int count)
706 /* Replicate the test tag and compare. */
716 if (unlikely((diff) & 0x0f)) {
725 if (unlikely((diff) & 0xf0)) {
738 * mte_probe_int() - helper for mte_probe and mte_check
739 * @env: CPU environment
740 * @desc: MTEDESC descriptor
741 * @ptr: virtual address of the base of the access
742 * @fault: return virtual address of the first check failure
744 * Internal routine for both mte_probe and mte_check.
745 * Return zero on failure, filling in *fault.
746 * Return negative on trivial success for tbi disabled.
747 * Return positive on success with tbi enabled.
749 static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
750 uintptr_t ra, uint64_t *fault)
752 int mmu_idx, ptr_tag, bit55;
753 uint64_t ptr_last, prev_page, next_page;
754 uint64_t tag_first, tag_last;
755 uint32_t sizem1, tag_count, n, c;
756 uint8_t *mem1, *mem2;
759 bit55 = extract64(ptr, 55, 1);
762 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
763 if (unlikely(!tbi_check(desc, bit55))) {
767 ptr_tag = allocation_tag_from_addr(ptr);
769 if (tcma_check(desc, bit55, ptr_tag)) {
773 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
774 type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
775 sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
777 /* Find the addr of the end of the access */
778 ptr_last = ptr + sizem1;
780 /* Round the bounds to the tag granule, and compute the number of tags. */
781 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
782 tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
783 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
785 /* Locate the page boundaries. */
786 prev_page = ptr & TARGET_PAGE_MASK;
787 next_page = prev_page + TARGET_PAGE_SIZE;
789 if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
790 /* Memory access stays on one page. */
791 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
796 /* Perform all of the comparisons. */
797 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
799 /* Memory access crosses to next page. */
800 mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
803 mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
804 ptr_last - next_page + 1,
808 * Perform all of the comparisons.
809 * Note the possible but unlikely case of the operation spanning
810 * two pages that do not both have tagging enabled.
812 n = c = (next_page - tag_first) / TAG_GRANULE;
814 n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
820 n += checkN(mem2, 0, ptr_tag, tag_count - c);
824 if (likely(n == tag_count)) {
829 * If we failed, we know which granule. For the first granule, the
830 * failure address is @ptr, the first byte accessed. Otherwise the
831 * failure address is the first byte of the nth granule.
834 *fault = tag_first + n * TAG_GRANULE;
839 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
842 int ret = mte_probe_int(env, desc, ptr, ra, &fault);
844 if (unlikely(ret == 0)) {
845 mte_check_fail(env, desc, fault, ra);
846 } else if (ret < 0) {
849 return useronly_clean_ptr(ptr);
852 uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
855 * R_XCHFJ: Alignment check not caused by memory type is priority 1,
856 * higher than any translation fault. When MTE is disabled, tcg
857 * performs the alignment check during the code generated for the
858 * memory access. With MTE enabled, we must check this here before
859 * raising any translation fault in allocation_tag_mem.
861 unsigned align = FIELD_EX32(desc, MTEDESC, ALIGN);
862 if (unlikely(align)) {
863 align = (1u << align) - 1;
864 if (unlikely(ptr & align)) {
865 int idx = FIELD_EX32(desc, MTEDESC, MIDX);
866 bool w = FIELD_EX32(desc, MTEDESC, WRITE);
867 MMUAccessType type = w ? MMU_DATA_STORE : MMU_DATA_LOAD;
868 arm_cpu_do_unaligned_access(env_cpu(env), ptr, type, idx, GETPC());
872 return mte_check(env, desc, ptr, GETPC());
876 * No-fault version of mte_check, to be used by SVE for MemSingleNF.
877 * Returns false if the access is Checked and the check failed. This
878 * is only intended to probe the tag -- the validity of the page must
879 * be checked beforehand.
881 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
884 int ret = mte_probe_int(env, desc, ptr, 0, &fault);
890 * Perform an MTE checked access for DC_ZVA.
892 uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
894 uintptr_t ra = GETPC();
895 int log2_dcz_bytes, log2_tag_bytes;
897 intptr_t dcz_bytes, tag_bytes, i;
899 uint64_t ptr_tag, mem_tag, align_ptr;
901 bit55 = extract64(ptr, 55, 1);
903 /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
904 if (unlikely(!tbi_check(desc, bit55))) {
908 ptr_tag = allocation_tag_from_addr(ptr);
910 if (tcma_check(desc, bit55, ptr_tag)) {
915 * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
916 * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
917 * sure that we can access one complete tag byte here.
919 log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
920 log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
921 dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
922 tag_bytes = (intptr_t)1 << log2_tag_bytes;
923 align_ptr = ptr & -dcz_bytes;
926 * Trap if accessing an invalid page. DC_ZVA requires that we supply
927 * the original pointer for an invalid page. But watchpoints require
928 * that we probe the actual space. So do both.
930 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
931 (void) probe_write(env, ptr, 1, mmu_idx, ra);
932 mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
933 dcz_bytes, MMU_DATA_LOAD, ra);
939 * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
940 * it is quite easy to perform all of the comparisons at once without
943 * The most common zva block size is 64; some of the thunderx cpus use
944 * a block size of 128. For user-only, aarch64_max_initfn will set the
945 * block size to 512. Fill out the other cases for future-proofing.
947 * In order to be able to find the first miscompare later, we want the
948 * tag bytes to be in little-endian order.
950 switch (log2_tag_bytes) {
951 case 0: /* zva_blocksize 32 */
952 mem_tag = *(uint8_t *)mem;
955 case 1: /* zva_blocksize 64 */
956 mem_tag = cpu_to_le16(*(uint16_t *)mem);
959 case 2: /* zva_blocksize 128 */
960 mem_tag = cpu_to_le32(*(uint32_t *)mem);
961 ptr_tag *= 0x11111111u;
963 case 3: /* zva_blocksize 256 */
964 mem_tag = cpu_to_le64(*(uint64_t *)mem);
965 ptr_tag *= 0x1111111111111111ull;
968 default: /* zva_blocksize 512, 1024, 2048 */
969 ptr_tag *= 0x1111111111111111ull;
972 mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
973 if (unlikely(mem_tag != ptr_tag)) {
977 align_ptr += 16 * TAG_GRANULE;
978 } while (i < tag_bytes);
982 if (likely(mem_tag == ptr_tag)) {
987 /* Locate the first nibble that differs. */
988 i = ctz64(mem_tag ^ ptr_tag) >> 4;
989 mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
992 return useronly_clean_ptr(ptr);
995 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
998 int mmu_idx, tag_count;
999 uint64_t ptr_tag, tag_first, tag_last;
1001 bool w = FIELD_EX32(desc, MTEDESC, WRITE);
1004 mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
1005 /* True probe; this will never fault */
1006 mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
1007 w ? MMU_DATA_STORE : MMU_DATA_LOAD,
1008 size, MMU_DATA_LOAD, true, 0);
1014 * TODO: checkN() is not designed for checks of the size we expect
1015 * for FEAT_MOPS operations, so we should implement this differently.
1016 * Maybe we should do something like
1017 * if (region start and size are aligned nicely) {
1018 * do direct loads of 64 tag bits at a time;
1023 /* Round the bounds to the tag granule, and compute the number of tags. */
1024 ptr_tag = allocation_tag_from_addr(ptr);
1025 tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
1026 tag_last = QEMU_ALIGN_DOWN(ptr + size - 1, TAG_GRANULE);
1027 tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
1028 n = checkN(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
1029 if (likely(n == tag_count)) {
1034 * Failure; for the first granule, it's at @ptr. Otherwise
1035 * it's at the first byte of the nth granule. Calculate how
1036 * many bytes we can access without hitting that failure.
1041 return n * TAG_GRANULE - (ptr - tag_first);