2 * Memory region management for Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qemu/units.h"
27 #include "qapi/error.h"
28 #include "exec/exec-all.h"
30 #include "tcg-internal.h"
33 struct tcg_region_tree {
36 /* padding to avoid false sharing is computed at run-time */
40 * We divide code_gen_buffer into equally-sized "regions" that TCG threads
41 * dynamically allocate from as demand dictates. Given appropriate region
42 * sizing, this minimizes flushes even when some TCG threads generate a lot
43 * more code than others.
45 struct tcg_region_state {
48 /* fields set at init time */
52 size_t size; /* size of one region */
53 size_t stride; /* .size + guard size */
54 size_t total_size; /* size of entire buffer, >= n * stride */
56 /* fields protected by the lock */
57 size_t current; /* current region index */
58 size_t agg_size_full; /* aggregate size of full regions */
61 static struct tcg_region_state region;
64 * This is an array of struct tcg_region_tree's, with padding.
65 * We use void * to simplify the computation of region_trees[i]; each
66 * struct is found every tree_size bytes.
68 static void *region_trees;
69 static size_t tree_size;
71 bool in_code_gen_buffer(const void *p)
74 * Much like it is valid to have a pointer to the byte past the
75 * end of an array (so long as you don't dereference it), allow
76 * a pointer to the byte past the end of the code gen buffer.
78 return (size_t)(p - region.start_aligned) <= region.total_size;
81 #ifdef CONFIG_DEBUG_TCG
82 const void *tcg_splitwx_to_rx(void *rw)
84 /* Pass NULL pointers unchanged. */
86 g_assert(in_code_gen_buffer(rw));
87 rw += tcg_splitwx_diff;
92 void *tcg_splitwx_to_rw(const void *rx)
94 /* Pass NULL pointers unchanged. */
96 rx -= tcg_splitwx_diff;
97 /* Assert that we end with a pointer in the rw region. */
98 g_assert(in_code_gen_buffer(rx));
102 #endif /* CONFIG_DEBUG_TCG */
104 /* compare a pointer @ptr and a tb_tc @s */
105 static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
107 if (ptr >= s->ptr + s->size) {
109 } else if (ptr < s->ptr) {
115 static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
117 const struct tb_tc *a = ap;
118 const struct tb_tc *b = bp;
121 * When both sizes are set, we know this isn't a lookup.
122 * This is the most likely case: every TB must be inserted; lookups
123 * are a lot less frequent.
125 if (likely(a->size && b->size)) {
126 if (a->ptr > b->ptr) {
128 } else if (a->ptr < b->ptr) {
131 /* a->ptr == b->ptr should happen only on deletions */
132 g_assert(a->size == b->size);
136 * All lookups have either .size field set to 0.
137 * From the glib sources we see that @ap is always the lookup key. However
138 * the docs provide no guarantee, so we just mark this case as likely.
140 if (likely(a->size == 0)) {
141 return ptr_cmp_tb_tc(a->ptr, b);
143 return ptr_cmp_tb_tc(b->ptr, a);
146 static void tcg_region_trees_init(void)
150 tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
151 region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
152 for (i = 0; i < region.n; i++) {
153 struct tcg_region_tree *rt = region_trees + i * tree_size;
155 qemu_mutex_init(&rt->lock);
156 rt->tree = g_tree_new(tb_tc_cmp);
160 static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
165 * Like tcg_splitwx_to_rw, with no assert. The pc may come from
166 * a signal handler over which the caller has no control.
168 if (!in_code_gen_buffer(p)) {
169 p -= tcg_splitwx_diff;
170 if (!in_code_gen_buffer(p)) {
175 if (p < region.start_aligned) {
178 ptrdiff_t offset = p - region.start_aligned;
180 if (offset > region.stride * (region.n - 1)) {
181 region_idx = region.n - 1;
183 region_idx = offset / region.stride;
186 return region_trees + region_idx * tree_size;
189 void tcg_tb_insert(TranslationBlock *tb)
191 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
193 g_assert(rt != NULL);
194 qemu_mutex_lock(&rt->lock);
195 g_tree_insert(rt->tree, &tb->tc, tb);
196 qemu_mutex_unlock(&rt->lock);
199 void tcg_tb_remove(TranslationBlock *tb)
201 struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
203 g_assert(rt != NULL);
204 qemu_mutex_lock(&rt->lock);
205 g_tree_remove(rt->tree, &tb->tc);
206 qemu_mutex_unlock(&rt->lock);
210 * Find the TB 'tb' such that
211 * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
212 * Return NULL if not found.
214 TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
216 struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
217 TranslationBlock *tb;
218 struct tb_tc s = { .ptr = (void *)tc_ptr };
224 qemu_mutex_lock(&rt->lock);
225 tb = g_tree_lookup(rt->tree, &s);
226 qemu_mutex_unlock(&rt->lock);
230 static void tcg_region_tree_lock_all(void)
234 for (i = 0; i < region.n; i++) {
235 struct tcg_region_tree *rt = region_trees + i * tree_size;
237 qemu_mutex_lock(&rt->lock);
241 static void tcg_region_tree_unlock_all(void)
245 for (i = 0; i < region.n; i++) {
246 struct tcg_region_tree *rt = region_trees + i * tree_size;
248 qemu_mutex_unlock(&rt->lock);
252 void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
256 tcg_region_tree_lock_all();
257 for (i = 0; i < region.n; i++) {
258 struct tcg_region_tree *rt = region_trees + i * tree_size;
260 g_tree_foreach(rt->tree, func, user_data);
262 tcg_region_tree_unlock_all();
265 size_t tcg_nb_tbs(void)
270 tcg_region_tree_lock_all();
271 for (i = 0; i < region.n; i++) {
272 struct tcg_region_tree *rt = region_trees + i * tree_size;
274 nb_tbs += g_tree_nnodes(rt->tree);
276 tcg_region_tree_unlock_all();
280 static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
282 TranslationBlock *tb = v;
288 static void tcg_region_tree_reset_all(void)
292 tcg_region_tree_lock_all();
293 for (i = 0; i < region.n; i++) {
294 struct tcg_region_tree *rt = region_trees + i * tree_size;
296 g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
297 /* Increment the refcount first so that destroy acts as a reset */
298 g_tree_ref(rt->tree);
299 g_tree_destroy(rt->tree);
301 tcg_region_tree_unlock_all();
304 static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
308 start = region.start_aligned + curr_region * region.stride;
309 end = start + region.size;
311 if (curr_region == 0) {
312 start = region.after_prologue;
314 /* The final region may have a few extra pages due to earlier rounding. */
315 if (curr_region == region.n - 1) {
316 end = region.start_aligned + region.total_size;
323 static void tcg_region_assign(TCGContext *s, size_t curr_region)
327 tcg_region_bounds(curr_region, &start, &end);
329 s->code_gen_buffer = start;
330 s->code_gen_ptr = start;
331 s->code_gen_buffer_size = end - start;
332 s->code_gen_highwater = end - TCG_HIGHWATER;
335 static bool tcg_region_alloc__locked(TCGContext *s)
337 if (region.current == region.n) {
340 tcg_region_assign(s, region.current);
346 * Request a new region once the one in use has filled up.
347 * Returns true on error.
349 bool tcg_region_alloc(TCGContext *s)
352 /* read the region size now; alloc__locked will overwrite it on success */
353 size_t size_full = s->code_gen_buffer_size;
355 qemu_mutex_lock(®ion.lock);
356 err = tcg_region_alloc__locked(s);
358 region.agg_size_full += size_full - TCG_HIGHWATER;
360 qemu_mutex_unlock(®ion.lock);
365 * Perform a context's first region allocation.
366 * This function does _not_ increment region.agg_size_full.
368 static void tcg_region_initial_alloc__locked(TCGContext *s)
370 bool err = tcg_region_alloc__locked(s);
374 void tcg_region_initial_alloc(TCGContext *s)
376 qemu_mutex_lock(®ion.lock);
377 tcg_region_initial_alloc__locked(s);
378 qemu_mutex_unlock(®ion.lock);
381 /* Call from a safe-work context */
382 void tcg_region_reset_all(void)
384 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
387 qemu_mutex_lock(®ion.lock);
389 region.agg_size_full = 0;
391 for (i = 0; i < n_ctxs; i++) {
392 TCGContext *s = qatomic_read(&tcg_ctxs[i]);
393 tcg_region_initial_alloc__locked(s);
395 qemu_mutex_unlock(®ion.lock);
397 tcg_region_tree_reset_all();
400 static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
402 #ifdef CONFIG_USER_ONLY
408 * It is likely that some vCPUs will translate more code than others,
409 * so we first try to set more regions than max_cpus, with those regions
410 * being of reasonable size. If that's not possible we make do by evenly
411 * dividing the code_gen_buffer among the vCPUs.
413 /* Use a single region if all we have is one vCPU thread */
414 if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
419 * Try to have more regions than max_cpus, with each region being >= 2 MB.
420 * If we can't, then just allocate one region per vCPU thread.
422 n_regions = tb_size / (2 * MiB);
423 if (n_regions <= max_cpus) {
426 return MIN(n_regions, max_cpus * 8);
431 * Minimum size of the code gen buffer. This number is randomly chosen,
432 * but not so small that we can't have a fair number of TB's live.
434 * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h.
435 * Unless otherwise indicated, this is constrained by the range of
436 * direct branches on the host cpu, as used by the TCG implementation
439 #define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
441 #if TCG_TARGET_REG_BITS == 32
442 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
443 #ifdef CONFIG_USER_ONLY
445 * For user mode on smaller 32 bit systems we may run into trouble
446 * allocating big chunks of data in the right place. On these systems
447 * we utilise a static code generation buffer directly in the binary.
449 #define USE_STATIC_CODE_GEN_BUFFER
451 #else /* TCG_TARGET_REG_BITS == 64 */
452 #ifdef CONFIG_USER_ONLY
454 * As user-mode emulation typically means running multiple instances
455 * of the translator don't go too nuts with our default code gen
456 * buffer lest we make things too hard for the OS.
458 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
461 * We expect most system emulation to run one or two guests per host.
462 * Users running large scale system emulation may want to tweak their
463 * runtime setup via the tb-size control on the command line.
465 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
469 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
470 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
471 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
473 static size_t size_code_gen_buffer(size_t tb_size)
475 /* Size the buffer. */
477 size_t phys_mem = qemu_get_host_physmem();
479 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481 tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
484 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
485 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
487 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
488 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
495 * In order to use J and JAL within the code_gen_buffer, we require
496 * that the buffer not cross a 256MB boundary.
498 static inline bool cross_256mb(void *addr, size_t size)
500 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
504 * We weren't able to allocate a buffer without crossing that boundary,
505 * so make do with the larger portion of the buffer that doesn't cross.
506 * Returns the new base and size of the buffer in *obuf and *osize.
508 static inline void split_cross_256mb(void **obuf, size_t *osize,
509 void *buf1, size_t size1)
511 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
512 size_t size2 = buf1 + size1 - buf2;
525 #ifdef USE_STATIC_CODE_GEN_BUFFER
526 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
527 __attribute__((aligned(CODE_GEN_ALIGN)));
529 static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
535 error_setg(errp, "jit split-wx not supported");
539 /* page-align the beginning and end of the buffer */
540 buf = static_code_gen_buffer;
541 end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
542 buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
543 end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
547 /* Honor a command-line option limiting the size of the buffer. */
548 if (size > tb_size) {
549 size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
553 if (cross_256mb(buf, size)) {
554 split_cross_256mb(&buf, &size, buf, size);
558 if (qemu_mprotect_rwx(buf, size)) {
559 error_setg_errno(errp, errno, "mprotect of jit buffer");
562 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
564 region.start_aligned = buf;
565 region.total_size = size;
567 return PROT_READ | PROT_WRITE;
569 #elif defined(_WIN32)
570 static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
575 error_setg(errp, "jit split-wx not supported");
579 buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
580 PAGE_EXECUTE_READWRITE);
582 error_setg_win32(errp, GetLastError(),
583 "allocate %zu bytes for jit buffer", size);
587 region.start_aligned = buf;
588 region.total_size = size;
590 return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
593 static int alloc_code_gen_buffer_anon(size_t size, int prot,
594 int flags, Error **errp)
598 buf = mmap(NULL, size, prot, flags, -1, 0);
599 if (buf == MAP_FAILED) {
600 error_setg_errno(errp, errno,
601 "allocate %zu bytes for jit buffer", size);
606 if (cross_256mb(buf, size)) {
608 * Try again, with the original still mapped, to avoid re-acquiring
609 * the same 256mb crossing.
612 void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
613 switch ((int)(buf2 != MAP_FAILED)) {
615 if (!cross_256mb(buf2, size)) {
616 /* Success! Use the new buffer. */
620 /* Failure. Work with what we had. */
624 /* Split the original buffer. Free the smaller half. */
625 split_cross_256mb(&buf2, &size2, buf, size);
627 munmap(buf + size2, size - size2);
629 munmap(buf, size - size2);
638 /* Request large pages for the buffer. */
639 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
641 region.start_aligned = buf;
642 region.total_size = size;
646 #ifndef CONFIG_TCG_INTERPRETER
648 #include "qemu/memfd.h"
650 static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
652 void *buf_rw = NULL, *buf_rx = MAP_FAILED;
656 /* Find space for the RX mapping, vs the 256MiB regions. */
657 if (alloc_code_gen_buffer_anon(size, PROT_NONE,
658 MAP_PRIVATE | MAP_ANONYMOUS |
659 MAP_NORESERVE, errp) < 0) {
662 /* The size of the mapping may have been adjusted. */
663 buf_rx = region.start_aligned;
664 size = region.total_size;
667 buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
668 if (buf_rw == NULL) {
673 void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
674 MAP_SHARED | MAP_FIXED, fd, 0);
679 buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
680 if (buf_rx == MAP_FAILED) {
686 region.start_aligned = buf_rw;
687 region.total_size = size;
688 tcg_splitwx_diff = buf_rx - buf_rw;
690 /* Request large pages for the buffer and the splitwx. */
691 qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
692 qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
693 return PROT_READ | PROT_WRITE;
696 error_setg_errno(errp, errno, "failed to map shared memory for execute");
698 if (buf_rx != MAP_FAILED) {
699 munmap(buf_rx, size);
702 munmap(buf_rw, size);
709 #endif /* CONFIG_POSIX */
712 #include <mach/mach.h>
714 extern kern_return_t mach_vm_remap(vm_map_t target_task,
715 mach_vm_address_t *target_address,
717 mach_vm_offset_t mask,
720 mach_vm_address_t src_address,
722 vm_prot_t *cur_protection,
723 vm_prot_t *max_protection,
724 vm_inherit_t inheritance);
726 static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
729 mach_vm_address_t buf_rw, buf_rx;
730 vm_prot_t cur_prot, max_prot;
732 /* Map the read-write portion via normal anon memory. */
733 if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
734 MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
738 buf_rw = (mach_vm_address_t)region.start_aligned;
740 ret = mach_vm_remap(mach_task_self(),
751 if (ret != KERN_SUCCESS) {
752 /* TODO: Convert "ret" to a human readable error message. */
753 error_setg(errp, "vm_remap for jit splitwx failed");
754 munmap((void *)buf_rw, size);
758 if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
759 error_setg_errno(errp, errno, "mprotect for jit splitwx");
760 munmap((void *)buf_rx, size);
761 munmap((void *)buf_rw, size);
765 tcg_splitwx_diff = buf_rx - buf_rw;
766 return PROT_READ | PROT_WRITE;
768 #endif /* CONFIG_DARWIN */
769 #endif /* CONFIG_TCG_INTERPRETER */
771 static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
773 #ifndef CONFIG_TCG_INTERPRETER
774 # ifdef CONFIG_DARWIN
775 return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
778 return alloc_code_gen_buffer_splitwx_memfd(size, errp);
781 error_setg(errp, "jit split-wx not supported");
785 static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
791 prot = alloc_code_gen_buffer_splitwx(size, errp);
796 * If splitwx force-on (1), fail;
797 * if splitwx default-on (-1), fall through to splitwx off.
802 error_free_or_abort(errp);
805 prot = PROT_READ | PROT_WRITE | PROT_EXEC;
806 flags = MAP_PRIVATE | MAP_ANONYMOUS;
807 #ifdef CONFIG_TCG_INTERPRETER
808 /* The tcg interpreter does not need execute permission. */
809 prot = PROT_READ | PROT_WRITE;
810 #elif defined(CONFIG_DARWIN)
811 /* Applicable to both iOS and macOS (Apple Silicon). */
817 return alloc_code_gen_buffer_anon(size, prot, flags, errp);
819 #endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
822 * Initializes region partitioning.
824 * Called at init time from the parent thread (i.e. the one calling
825 * tcg_context_init), after the target's TCG globals have been set.
827 * Region partitioning works by splitting code_gen_buffer into separate regions,
828 * and then assigning regions to TCG threads so that the threads can translate
829 * code in parallel without synchronization.
831 * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
832 * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
833 * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
834 * must have been parsed before calling this function, since it calls
835 * qemu_tcg_mttcg_enabled().
837 * In user-mode we use a single region. Having multiple regions in user-mode
838 * is not supported, because the number of vCPU threads (recall that each thread
839 * spawned by the guest corresponds to a vCPU thread) is only bounded by the
840 * OS, and usually this number is huge (tens of thousands is not uncommon).
841 * Thus, given this large bound on the number of vCPU threads and the fact
842 * that code_gen_buffer is allocated at compile-time, we cannot guarantee
843 * that the availability of at least one region per vCPU thread.
845 * However, this user-mode limitation is unlikely to be a significant problem
846 * in practice. Multi-threaded guests share most if not all of their translated
847 * code, which makes parallel code generation less appealing than in softmmu.
849 void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
856 have_prot = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
857 splitwx, &error_fatal);
858 assert(have_prot >= 0);
861 * Make region_size a multiple of page_size, using aligned as the start.
862 * As a result of this we might end up with a few extra pages at the end of
863 * the buffer; we will assign those to the last region.
865 region.n = tcg_n_regions(region.total_size, max_cpus);
866 page_size = qemu_real_host_page_size;
867 region_size = region.total_size / region.n;
868 region_size = QEMU_ALIGN_DOWN(region_size, page_size);
870 /* A region must have at least 2 pages; one code, one guard */
871 g_assert(region_size >= 2 * page_size);
872 region.stride = region_size;
874 /* Reserve space for guard pages. */
875 region.size = region_size - page_size;
876 region.total_size -= page_size;
879 * The first region will be smaller than the others, via the prologue,
880 * which has yet to be allocated. For now, the first region begins at
883 region.after_prologue = region.start_aligned;
885 /* init the region struct */
886 qemu_mutex_init(®ion.lock);
889 * Set guard pages in the rw buffer, as that's the one into which
890 * buffer overruns could occur. Do not set guard pages in the rx
891 * buffer -- let that one use hugepages throughout.
893 for (i = 0; i < region.n; i++) {
896 tcg_region_bounds(i, &start, &end);
899 * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
900 * rejects a permission change from RWX -> NONE. Guard pages are
901 * nice for bug detection but are not essential; ignore any failure.
903 (void)qemu_mprotect_none(end, page_size);
906 tcg_region_trees_init();
909 * Leave the initial context initialized to the first region.
910 * This will be the context into which we generate the prologue.
911 * It is also the only context for CONFIG_USER_ONLY.
913 tcg_region_initial_alloc__locked(&tcg_init_ctx);
916 void tcg_region_prologue_set(TCGContext *s)
918 /* Deduct the prologue from the first region. */
919 g_assert(region.start_aligned == s->code_gen_buffer);
920 region.after_prologue = s->code_ptr;
922 /* Recompute boundaries of the first region. */
923 tcg_region_assign(s, 0);
925 /* Register the balance of the buffer with gdb. */
926 tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue),
927 region.start_aligned + region.total_size -
928 region.after_prologue);
932 * Returns the size (in bytes) of all translated code (i.e. from all regions)
933 * currently in the cache.
934 * See also: tcg_code_capacity()
935 * Do not confuse with tcg_current_code_size(); that one applies to a single
938 size_t tcg_code_size(void)
940 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
944 qemu_mutex_lock(®ion.lock);
945 total = region.agg_size_full;
946 for (i = 0; i < n_ctxs; i++) {
947 const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
950 size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
951 g_assert(size <= s->code_gen_buffer_size);
954 qemu_mutex_unlock(®ion.lock);
959 * Returns the code capacity (in bytes) of the entire cache, i.e. including all
961 * See also: tcg_code_size()
963 size_t tcg_code_capacity(void)
965 size_t guard_size, capacity;
967 /* no need for synchronization; these variables are set at init time */
968 guard_size = region.stride - region.size;
969 capacity = region.total_size;
970 capacity -= (region.n - 1) * guard_size;
971 capacity -= region.n * TCG_HIGHWATER;
976 size_t tcg_tb_phys_invalidate_count(void)
978 unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
982 for (i = 0; i < n_ctxs; i++) {
983 const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
985 total += qatomic_read(&s->tb_phys_invalidate_count);