int fault_size, MMUAccessType access_type,
int mmu_idx, bool nonfault,
void **phost, CPUTLBEntryFull **pfull,
- uintptr_t retaddr)
+ uintptr_t retaddr, bool check_mem_cbs)
{
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
vaddr page_addr = addr & TARGET_PAGE_MASK;
int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
+ bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(env_cpu(env));
CPUTLBEntryFull *full;
if (!tlb_hit_page(tlb_addr, page_addr)) {
flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
- if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
+ if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))
+ ||
+ (access_type != MMU_INST_FETCH && force_mmio)) {
*phost = NULL;
return TLB_MMIO;
}
uintptr_t retaddr)
{
int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- nonfault, phost, pfull, retaddr);
+ nonfault, phost, pfull, retaddr, true);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
return flags;
}
+int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ void **phost, CPUTLBEntryFull **pfull)
+{
+ void *discard_phost;
+ CPUTLBEntryFull *discard_tlb;
+
+ /* privately handle users that don't need full results */
+ phost = phost ? phost : &discard_phost;
+ pfull = pfull ? pfull : &discard_tlb;
+
+ int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
+ true, phost, pfull, 0, false);
+
+ /* Handle clean RAM pages. */
+ if (unlikely(flags & TLB_NOTDIRTY)) {
+ notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
+ flags &= ~TLB_NOTDIRTY;
+ }
+
+ return flags;
+}
+
int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- nonfault, phost, &full, retaddr);
+ nonfault, phost, &full, retaddr, true);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- false, &host, &full, retaddr);
+ false, &host, &full, retaddr, true);
/* Per the interface, size == 0 merely faults the access. */
if (size == 0) {
int flags;
flags = probe_access_internal(env, addr, 0, access_type,
- mmu_idx, true, &host, &full, 0);
+ mmu_idx, true, &host, &full, 0, false);
/* No combination of flags are expected by the caller. */
return flags ? NULL : host;
void *p;
(void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
- cpu_mmu_index(env, true), false, &p, &full, 0);
+ cpu_mmu_index(env, true), false,
+ &p, &full, 0, false);
if (p == NULL) {
return -1;
}
if (guest_addr_valid_untagged(addr)) {
int page_flags = page_get_flags(addr);
if (page_flags & acc_flag) {
+ if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
+ && cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
+ return TLB_MMIO;
+ }
return 0; /* success */
}
maperr = !(page_flags & PAGE_VALID);
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
- *phost = flags ? NULL : g2h(env_cpu(env), addr);
+ *phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
return flags;
}
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
flags = probe_access_internal(env, addr, size, access_type, false, ra);
- g_assert(flags == 0);
+ g_assert((flags & ~TLB_MMIO) == 0);
return size ? g2h(env_cpu(env), addr) : NULL;
}
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost,
CPUTLBEntryFull **pfull, uintptr_t retaddr);
+
+/**
+ * probe_access_mmu() - Like probe_access_full except cannot fault and
+ * doesn't trigger instrumentation.
+ *
+ * @env: CPUArchState
+ * @vaddr: virtual address to probe
+ * @size: size of the probe
+ * @access_type: read, write or execute permission
+ * @mmu_idx: softmmu index
+ * @phost: ptr to return value host address or NULL
+ * @pfull: ptr to return value CPUTLBEntryFull structure or NULL
+ *
+ * The CPUTLBEntryFull structure returned via @pfull is transient
+ * and must be consumed or copied immediately, before any further
+ * access or changes to TLB @mmu_idx.
+ *
+ * Returns: TLB flags as per probe_access_flags()
+ */
+int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
+ MMUAccessType access_type, int mmu_idx,
+ void **phost, CPUTLBEntryFull **pfull);
+
#endif
/* Hide the qatomic_read to make code a little easier on the eyes */
int flags;
env->tlb_fi = fi;
- flags = probe_access_full(env, addr, 0, MMU_DATA_LOAD,
- arm_to_core_mmu_idx(s2_mmu_idx),
- true, &ptw->out_host, &full, 0);
+ flags = probe_access_full_mmu(env, addr, 0, MMU_DATA_LOAD,
+ arm_to_core_mmu_idx(s2_mmu_idx),
+ &ptw->out_host, &full);
env->tlb_fi = NULL;
if (unlikely(flags & TLB_INVALID_MASK)) {
*/
if (unlikely(!ptw->out_rw)) {
int flags;
- void *discard;
env->tlb_fi = fi;
- flags = probe_access_flags(env, ptw->out_virt, 0, MMU_DATA_STORE,
- arm_to_core_mmu_idx(ptw->in_ptw_idx),
- true, &discard, 0);
+ flags = probe_access_full_mmu(env, ptw->out_virt, 0,
+ MMU_DATA_STORE,
+ arm_to_core_mmu_idx(ptw->in_ptw_idx),
+ NULL, NULL);
env->tlb_fi = NULL;
if (unlikely(flags & TLB_INVALID_MASK)) {