* another process, because the fallback start_exclusive solution
* provides no protection across processes.
*/
- if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
+ if (page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
uint64_t *p = __builtin_assume_aligned(pv, 8);
return *p;
}
* another process, because the fallback start_exclusive solution
* provides no protection across processes.
*/
- if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
+ if (page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
return *p;
}
#endif
}
}
-int page_check_range(target_ulong start, target_ulong len, int flags)
+bool page_check_range(target_ulong start, target_ulong len, int flags)
{
target_ulong last;
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
- int ret;
+ bool ret;
if (len == 0) {
- return 0; /* trivial length */
+ return true; /* trivial length */
}
last = start + len - 1;
if (last < start) {
- return -1; /* wrap around */
+ return false; /* wrap around */
}
locked = have_mmap_lock();
p = pageflags_find(start, last);
}
if (!p) {
- ret = -1; /* entire region invalid */
+ ret = false; /* entire region invalid */
break;
}
}
if (start < p->itree.start) {
- ret = -1; /* initial bytes invalid */
+ ret = false; /* initial bytes invalid */
break;
}
missing = flags & ~p->flags;
if (missing & ~PAGE_WRITE) {
- ret = -1; /* page doesn't match */
+ ret = false; /* page doesn't match */
break;
}
if (missing & PAGE_WRITE) {
if (!(p->flags & PAGE_WRITE_ORG)) {
- ret = -1; /* page not writable */
+ ret = false; /* page not writable */
break;
}
/* Asking about writable, but has been protected: undo. */
if (!page_unprotect(start, 0)) {
- ret = -1;
+ ret = false;
break;
}
/* TODO: page_unprotect should take a range, not a single page. */
if (last - start < TARGET_PAGE_SIZE) {
- ret = 0; /* ok */
+ ret = true; /* ok */
break;
}
start += TARGET_PAGE_SIZE;
}
if (last <= p->itree.last) {
- ret = 0; /* ok */
+ ret = true; /* ok */
break;
}
start = p->itree.last + 1;
static inline bool access_ok(int type, abi_ulong addr, abi_ulong size)
{
- return page_check_range((target_ulong)addr, size, type) == 0;
+ return page_check_range((target_ulong)addr, size, type);
}
/*
int page_get_flags(target_ulong address);
void page_set_flags(target_ulong start, target_ulong last, int flags);
void page_reset_target_data(target_ulong start, target_ulong last);
-int page_check_range(target_ulong start, target_ulong len, int flags);
+
+/**
+ * page_check_range
+ * @start: first byte of range
+ * @len: length of range
+ * @flags: flags required for each page
+ *
+ * Return true if every page in [@start, @start+@len) has @flags set.
+ * Return false if any page is unmapped. Thus testing flags == 0 is
+ * equivalent to testing for flags == PAGE_VALID.
+ */
+bool page_check_range(target_ulong start, target_ulong last, int flags);
/**
* page_check_range_empty:
: !guest_range_valid_untagged(addr, size)) {
return false;
}
- return page_check_range((target_ulong)addr, size, type) == 0;
+ return page_check_range((target_ulong)addr, size, type);
}
static inline bool access_ok(CPUState *cpu, int type,
max = h2g_valid(max - 1) ?
max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
- if (page_check_range(h2g(min), max - min, flags) == -1) {
+ if (!page_check_range(h2g(min), max - min, flags)) {
continue;
}
uint32_t level, uint32_t want)
{
#ifdef CONFIG_USER_ONLY
- return (page_check_range(addr, 1, want) == 0) ? 1 : 0;
+ return page_check_range(addr, 1, want);
#else
int prot, excp;
hwaddr phys;
cpu_mmu_index(env, false));
if (host) {
#ifdef CONFIG_USER_ONLY
- if (page_check_range(addr, offset, PAGE_READ) < 0) {
+ if (page_check_range(addr, offset, PAGE_READ)) {
vl = i;
goto ProbeSuccess;
}
case ASI_PNFL: /* Primary no-fault LE */
case ASI_SNF: /* Secondary no-fault */
case ASI_SNFL: /* Secondary no-fault LE */
- if (page_check_range(addr, size, PAGE_READ) == -1) {
+ if (!page_check_range(addr, size, PAGE_READ)) {
ret = 0;
break;
}