#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "qemu/madvise.h"
+#include "qemu/mprotect.h"
+#include "qemu/memalign.h"
+#include "qemu/cacheinfo.h"
+#include "qemu/qtree.h"
#include "qapi/error.h"
-#include "exec/exec-all.h"
#include "tcg/tcg.h"
+#include "exec/translation-block.h"
#include "tcg-internal.h"
+#include "host/cpuinfo.h"
+/*
+ * Local source-level compatibility with Unix.
+ * Used by tcg_region_init below.
+ */
+#if defined(_WIN32)
+#define PROT_READ 1
+#define PROT_WRITE 2
+#define PROT_EXEC 4
+#endif
+
struct tcg_region_tree {
QemuMutex lock;
- GTree *tree;
+ QTree *tree;
/* padding to avoid false sharing is computed at run-time */
};
return (size_t)(p - region.start_aligned) <= region.total_size;
}
+#ifndef CONFIG_TCG_INTERPRETER
+static int host_prot_read_exec(void)
+{
+#if defined(CONFIG_LINUX) && defined(HOST_AARCH64) && defined(PROT_BTI)
+ if (cpuinfo & CPUINFO_BTI) {
+ return PROT_READ | PROT_EXEC | PROT_BTI;
+ }
+#endif
+ return PROT_READ | PROT_EXEC;
+}
+#endif
+
#ifdef CONFIG_DEBUG_TCG
const void *tcg_splitwx_to_rx(void *rw)
{
return 0;
}
-static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
+static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp, gpointer userdata)
{
const struct tb_tc *a = ap;
const struct tb_tc *b = bp;
return ptr_cmp_tb_tc(b->ptr, a);
}
+static void tb_destroy(gpointer value)
+{
+ TranslationBlock *tb = value;
+ qemu_spin_destroy(&tb->jmp_lock);
+}
+
static void tcg_region_trees_init(void)
{
size_t i;
struct tcg_region_tree *rt = region_trees + i * tree_size;
qemu_mutex_init(&rt->lock);
- rt->tree = g_tree_new(tb_tc_cmp);
+ rt->tree = q_tree_new_full(tb_tc_cmp, NULL, NULL, tb_destroy);
}
}
g_assert(rt != NULL);
qemu_mutex_lock(&rt->lock);
- g_tree_insert(rt->tree, &tb->tc, tb);
+ q_tree_insert(rt->tree, &tb->tc, tb);
qemu_mutex_unlock(&rt->lock);
}
g_assert(rt != NULL);
qemu_mutex_lock(&rt->lock);
- g_tree_remove(rt->tree, &tb->tc);
+ q_tree_remove(rt->tree, &tb->tc);
qemu_mutex_unlock(&rt->lock);
}
}
qemu_mutex_lock(&rt->lock);
- tb = g_tree_lookup(rt->tree, &s);
+ tb = q_tree_lookup(rt->tree, &s);
qemu_mutex_unlock(&rt->lock);
return tb;
}
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
- g_tree_foreach(rt->tree, func, user_data);
+ q_tree_foreach(rt->tree, func, user_data);
}
tcg_region_tree_unlock_all();
}
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
- nb_tbs += g_tree_nnodes(rt->tree);
+ nb_tbs += q_tree_nnodes(rt->tree);
}
tcg_region_tree_unlock_all();
return nb_tbs;
}
-static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
-{
- TranslationBlock *tb = v;
-
- tb_destroy(tb);
- return FALSE;
-}
-
static void tcg_region_tree_reset_all(void)
{
size_t i;
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
- g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
/* Increment the refcount first so that destroy acts as a reset */
- g_tree_ref(rt->tree);
- g_tree_destroy(rt->tree);
+ q_tree_ref(rt->tree);
+ q_tree_destroy(rt->tree);
}
tcg_region_tree_unlock_all();
}
(DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
-#ifdef __mips__
-/*
- * In order to use J and JAL within the code_gen_buffer, we require
- * that the buffer not cross a 256MB boundary.
- */
-static inline bool cross_256mb(void *addr, size_t size)
-{
- return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
-}
-
-/*
- * We weren't able to allocate a buffer without crossing that boundary,
- * so make do with the larger portion of the buffer that doesn't cross.
- * Returns the new base and size of the buffer in *obuf and *osize.
- */
-static inline void split_cross_256mb(void **obuf, size_t *osize,
- void *buf1, size_t size1)
-{
- void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
- size_t size2 = buf1 + size1 - buf2;
-
- size1 = buf2 - buf1;
- if (size1 < size2) {
- size1 = size2;
- buf1 = buf2;
- }
-
- *obuf = buf1;
- *osize = size1;
-}
-#endif
-
#ifdef USE_STATIC_CODE_GEN_BUFFER
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN)));
/* page-align the beginning and end of the buffer */
buf = static_code_gen_buffer;
end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
- buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
- end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
+ buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size());
+ end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size());
size = end - buf;
/* Honor a command-line option limiting the size of the buffer. */
if (size > tb_size) {
- size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
- }
-
-#ifdef __mips__
- if (cross_256mb(buf, size)) {
- split_cross_256mb(&buf, &size, buf, size);
- }
-#endif
-
- if (qemu_mprotect_rwx(buf, size)) {
- error_setg_errno(errp, errno, "mprotect of jit buffer");
- return false;
+ size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size());
}
region.start_aligned = buf;
region.start_aligned = buf;
region.total_size = size;
- return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
}
#else
static int alloc_code_gen_buffer_anon(size_t size, int prot,
return -1;
}
-#ifdef __mips__
- if (cross_256mb(buf, size)) {
- /*
- * Try again, with the original still mapped, to avoid re-acquiring
- * the same 256mb crossing.
- */
- size_t size2;
- void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
- switch ((int)(buf2 != MAP_FAILED)) {
- case 1:
- if (!cross_256mb(buf2, size)) {
- /* Success! Use the new buffer. */
- munmap(buf, size);
- break;
- }
- /* Failure. Work with what we had. */
- munmap(buf2, size);
- /* fallthru */
- default:
- /* Split the original buffer. Free the smaller half. */
- split_cross_256mb(&buf2, &size2, buf, size);
- if (buf == buf2) {
- munmap(buf + size2, size - size2);
- } else {
- munmap(buf, size - size2);
- }
- size = size2;
- break;
- }
- buf = buf2;
- }
-#endif
-
region.start_aligned = buf;
region.total_size = size;
return prot;
#ifdef CONFIG_POSIX
#include "qemu/memfd.h"
-static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
+static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
{
void *buf_rw = NULL, *buf_rx = MAP_FAILED;
int fd = -1;
-#ifdef __mips__
- /* Find space for the RX mapping, vs the 256MiB regions. */
- if (alloc_code_gen_buffer_anon(size, PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS |
- MAP_NORESERVE, errp) < 0) {
- return false;
- }
- /* The size of the mapping may have been adjusted. */
- buf_rx = region.start_aligned;
- size = region.total_size;
-#endif
-
buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
if (buf_rw == NULL) {
goto fail;
}
-#ifdef __mips__
- void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
- MAP_SHARED | MAP_FIXED, fd, 0);
- if (tmp != buf_rx) {
- goto fail_rx;
- }
-#else
- buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
+ buf_rx = mmap(NULL, size, host_prot_read_exec(), MAP_SHARED, fd, 0);
if (buf_rx == MAP_FAILED) {
goto fail_rx;
}
-#endif
close(fd);
region.start_aligned = buf_rw;
return -1;
}
- if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
+ if (mprotect((void *)buf_rx, size, host_prot_read_exec()) != 0) {
error_setg_errno(errp, errno, "mprotect for jit splitwx");
munmap((void *)buf_rx, size);
munmap((void *)buf_rw, size);
error_free_or_abort(errp);
}
- prot = PROT_READ | PROT_WRITE | PROT_EXEC;
+ /*
+ * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
+ * rejects a permission change from RWX -> NONE when reserving the
+ * guard pages later. We can go the other way with the same number
+ * of syscalls, so always begin with PROT_NONE.
+ */
+ prot = PROT_NONE;
flags = MAP_PRIVATE | MAP_ANONYMOUS;
-#ifdef CONFIG_TCG_INTERPRETER
- /* The tcg interpreter does not need execute permission. */
- prot = PROT_READ | PROT_WRITE;
-#elif defined(CONFIG_DARWIN)
+#ifdef CONFIG_DARWIN
/* Applicable to both iOS and macOS (Apple Silicon). */
if (!splitwx) {
flags |= MAP_JIT;
* and then assigning regions to TCG threads so that the threads can translate
* code in parallel without synchronization.
*
- * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
+ * In system-mode the number of TCG threads is bounded by max_cpus, so we use at
* least max_cpus regions in MTTCG. In !MTTCG we use a single region.
* Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
* must have been parsed before calling this function, since it calls
*
* However, this user-mode limitation is unlikely to be a significant problem
* in practice. Multi-threaded guests share most if not all of their translated
- * code, which makes parallel code generation less appealing than in softmmu.
+ * code, which makes parallel code generation less appealing than in system-mode
*/
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
{
- const size_t page_size = qemu_real_host_page_size;
+ const size_t page_size = qemu_real_host_page_size();
size_t region_size;
- size_t i;
- int have_prot;
+ int have_prot, need_prot;
/* Size the buffer. */
if (tb_size == 0) {
* Set guard pages in the rw buffer, as that's the one into which
* buffer overruns could occur. Do not set guard pages in the rx
* buffer -- let that one use hugepages throughout.
+ * Work with the page protections set up with the initial mapping.
*/
- for (i = 0; i < region.n; i++) {
+ need_prot = PROT_READ | PROT_WRITE;
+#ifndef CONFIG_TCG_INTERPRETER
+ if (tcg_splitwx_diff == 0) {
+ need_prot |= host_prot_read_exec();
+ }
+#endif
+ for (size_t i = 0, n = region.n; i < n; i++) {
void *start, *end;
tcg_region_bounds(i, &start, &end);
+ if (have_prot != need_prot) {
+ int rc;
- /*
- * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
- * rejects a permission change from RWX -> NONE. Guard pages are
- * nice for bug detection but are not essential; ignore any failure.
- */
- (void)qemu_mprotect_none(end, page_size);
+ if (need_prot == (PROT_READ | PROT_WRITE | PROT_EXEC)) {
+ rc = qemu_mprotect_rwx(start, end - start);
+ } else if (need_prot == (PROT_READ | PROT_WRITE)) {
+ rc = qemu_mprotect_rw(start, end - start);
+ } else {
+#ifdef CONFIG_POSIX
+ rc = mprotect(start, end - start, need_prot);
+#else
+ g_assert_not_reached();
+#endif
+ }
+ if (rc) {
+ error_setg_errno(&error_fatal, errno,
+ "mprotect of jit buffer");
+ }
+ }
+ if (have_prot != 0) {
+ /* Guard pages are nice for bug detection but are not essential. */
+ (void)qemu_mprotect_none(end, page_size);
+ }
}
tcg_region_trees_init();
return capacity;
}
-
-size_t tcg_tb_phys_invalidate_count(void)
-{
- unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
- unsigned int i;
- size_t total = 0;
-
- for (i = 0; i < n_ctxs; i++) {
- const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
-
- total += qatomic_read(&s->tb_phys_invalidate_count);
- }
- return total;
-}