4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
22 #include "qemu/cutils.h"
24 #include "exec/exec-all.h"
25 #include "exec/target_page.h"
27 #include "hw/qdev-core.h"
28 #include "hw/qdev-properties.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #include "hw/xen/xen.h"
33 #include "sysemu/kvm.h"
34 #include "sysemu/sysemu.h"
35 #include "qemu/timer.h"
36 #include "qemu/config-file.h"
37 #include "qemu/error-report.h"
38 #if defined(CONFIG_USER_ONLY)
40 #else /* !CONFIG_USER_ONLY */
42 #include "exec/memory.h"
43 #include "exec/ioport.h"
44 #include "sysemu/dma.h"
45 #include "sysemu/numa.h"
46 #include "sysemu/hw_accel.h"
47 #include "exec/address-spaces.h"
48 #include "sysemu/xen-mapcache.h"
49 #include "trace-root.h"
51 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
52 #include <linux/falloc.h>
56 #include "qemu/rcu_queue.h"
57 #include "qemu/main-loop.h"
58 #include "translate-all.h"
59 #include "sysemu/replay.h"
61 #include "exec/memory-internal.h"
62 #include "exec/ram_addr.h"
65 #include "migration/vmstate.h"
67 #include "qemu/range.h"
69 #include "qemu/mmap-alloc.h"
72 #include "monitor/monitor.h"
74 //#define DEBUG_SUBPAGE
76 #if !defined(CONFIG_USER_ONLY)
77 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
78 * are protected by the ramlist lock.
80 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
82 static MemoryRegion *system_memory;
83 static MemoryRegion *system_io;
85 AddressSpace address_space_io;
86 AddressSpace address_space_memory;
88 MemoryRegion io_mem_rom, io_mem_notdirty;
89 static MemoryRegion io_mem_unassigned;
91 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
92 #define RAM_PREALLOC (1 << 0)
94 /* RAM is mmap-ed with MAP_SHARED */
95 #define RAM_SHARED (1 << 1)
97 /* Only a portion of RAM (used_length) is actually used, and migrated.
98 * This used_length size can change across reboots.
100 #define RAM_RESIZEABLE (1 << 2)
104 #ifdef TARGET_PAGE_BITS_VARY
105 int target_page_bits;
106 bool target_page_bits_decided;
109 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
110 /* current CPU in the current thread. It is only valid inside
112 __thread CPUState *current_cpu;
113 /* 0 = Do not count executed instructions.
114 1 = Precise instruction counting.
115 2 = Adaptive rate instruction counting. */
118 uintptr_t qemu_host_page_size;
119 intptr_t qemu_host_page_mask;
121 bool set_preferred_target_page_bits(int bits)
123 /* The target page size is the lowest common denominator for all
124 * the CPUs in the system, so we can only make it smaller, never
125 * larger. And we can't make it smaller once we've committed to
128 #ifdef TARGET_PAGE_BITS_VARY
129 assert(bits >= TARGET_PAGE_BITS_MIN);
130 if (target_page_bits == 0 || target_page_bits > bits) {
131 if (target_page_bits_decided) {
134 target_page_bits = bits;
140 #if !defined(CONFIG_USER_ONLY)
142 static void finalize_target_page_bits(void)
144 #ifdef TARGET_PAGE_BITS_VARY
145 if (target_page_bits == 0) {
146 target_page_bits = TARGET_PAGE_BITS_MIN;
148 target_page_bits_decided = true;
152 typedef struct PhysPageEntry PhysPageEntry;
154 struct PhysPageEntry {
155 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
157 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
161 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
163 /* Size of the L2 (and L3, etc) page tables. */
164 #define ADDR_SPACE_BITS 64
167 #define P_L2_SIZE (1 << P_L2_BITS)
169 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
171 typedef PhysPageEntry Node[P_L2_SIZE];
173 typedef struct PhysPageMap {
176 unsigned sections_nb;
177 unsigned sections_nb_alloc;
179 unsigned nodes_nb_alloc;
181 MemoryRegionSection *sections;
184 struct AddressSpaceDispatch {
185 MemoryRegionSection *mru_section;
186 /* This is a multi-level map on the physical address space.
187 * The bottom level has pointers to MemoryRegionSections.
189 PhysPageEntry phys_map;
193 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194 typedef struct subpage_t {
198 uint16_t sub_section[];
201 #define PHYS_SECTION_UNASSIGNED 0
202 #define PHYS_SECTION_NOTDIRTY 1
203 #define PHYS_SECTION_ROM 2
204 #define PHYS_SECTION_WATCH 3
206 static void io_mem_init(void);
207 static void memory_map_init(void);
208 static void tcg_commit(MemoryListener *listener);
210 static MemoryRegion io_mem_watch;
213 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
214 * @cpu: the CPU whose AddressSpace this is
215 * @as: the AddressSpace itself
216 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
217 * @tcg_as_listener: listener for tracking changes to the AddressSpace
219 struct CPUAddressSpace {
222 struct AddressSpaceDispatch *memory_dispatch;
223 MemoryListener tcg_as_listener;
226 struct DirtyBitmapSnapshot {
229 unsigned long dirty[];
234 #if !defined(CONFIG_USER_ONLY)
236 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
238 static unsigned alloc_hint = 16;
239 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
240 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
241 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
242 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
243 alloc_hint = map->nodes_nb_alloc;
247 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
254 ret = map->nodes_nb++;
256 assert(ret != PHYS_MAP_NODE_NIL);
257 assert(ret != map->nodes_nb_alloc);
259 e.skip = leaf ? 0 : 1;
260 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
261 for (i = 0; i < P_L2_SIZE; ++i) {
262 memcpy(&p[i], &e, sizeof(e));
267 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
268 hwaddr *index, hwaddr *nb, uint16_t leaf,
272 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
274 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
275 lp->ptr = phys_map_node_alloc(map, level == 0);
277 p = map->nodes[lp->ptr];
278 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
280 while (*nb && lp < &p[P_L2_SIZE]) {
281 if ((*index & (step - 1)) == 0 && *nb >= step) {
287 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
293 static void phys_page_set(AddressSpaceDispatch *d,
294 hwaddr index, hwaddr nb,
297 /* Wildly overreserve - it doesn't matter much. */
298 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
300 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
303 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
304 * and update our entry so we can skip it and go directly to the destination.
306 static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
308 unsigned valid_ptr = P_L2_SIZE;
313 if (lp->ptr == PHYS_MAP_NODE_NIL) {
318 for (i = 0; i < P_L2_SIZE; i++) {
319 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
326 phys_page_compact(&p[i], nodes);
330 /* We can only compress if there's only one child. */
335 assert(valid_ptr < P_L2_SIZE);
337 /* Don't compress if it won't fit in the # of bits we have. */
338 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
342 lp->ptr = p[valid_ptr].ptr;
343 if (!p[valid_ptr].skip) {
344 /* If our only child is a leaf, make this a leaf. */
345 /* By design, we should have made this node a leaf to begin with so we
346 * should never reach here.
347 * But since it's so simple to handle this, let's do it just in case we
352 lp->skip += p[valid_ptr].skip;
356 void address_space_dispatch_compact(AddressSpaceDispatch *d)
358 if (d->phys_map.skip) {
359 phys_page_compact(&d->phys_map, d->map.nodes);
363 static inline bool section_covers_addr(const MemoryRegionSection *section,
366 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
367 * the section must cover the entire address space.
369 return int128_gethi(section->size) ||
370 range_covers_byte(section->offset_within_address_space,
371 int128_getlo(section->size), addr);
374 static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
376 PhysPageEntry lp = d->phys_map, *p;
377 Node *nodes = d->map.nodes;
378 MemoryRegionSection *sections = d->map.sections;
379 hwaddr index = addr >> TARGET_PAGE_BITS;
382 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
383 if (lp.ptr == PHYS_MAP_NODE_NIL) {
384 return §ions[PHYS_SECTION_UNASSIGNED];
387 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
390 if (section_covers_addr(§ions[lp.ptr], addr)) {
391 return §ions[lp.ptr];
393 return §ions[PHYS_SECTION_UNASSIGNED];
397 bool memory_region_is_unassigned(MemoryRegion *mr)
399 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
400 && mr != &io_mem_watch;
403 /* Called from RCU critical section */
404 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
406 bool resolve_subpage)
408 MemoryRegionSection *section = atomic_read(&d->mru_section);
411 if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
412 !section_covers_addr(section, addr)) {
413 section = phys_page_find(d, addr);
414 atomic_set(&d->mru_section, section);
416 if (resolve_subpage && section->mr->subpage) {
417 subpage = container_of(section->mr, subpage_t, iomem);
418 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
423 /* Called from RCU critical section */
424 static MemoryRegionSection *
425 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
426 hwaddr *plen, bool resolve_subpage)
428 MemoryRegionSection *section;
432 section = address_space_lookup_region(d, addr, resolve_subpage);
433 /* Compute offset within MemoryRegionSection */
434 addr -= section->offset_within_address_space;
436 /* Compute offset within MemoryRegion */
437 *xlat = addr + section->offset_within_region;
441 /* MMIO registers can be expected to perform full-width accesses based only
442 * on their address, without considering adjacent registers that could
443 * decode to completely different MemoryRegions. When such registers
444 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
445 * regions overlap wildly. For this reason we cannot clamp the accesses
448 * If the length is small (as is the case for address_space_ldl/stl),
449 * everything works fine. If the incoming length is large, however,
450 * the caller really has to do the clamping through memory_access_size.
452 if (memory_region_is_ram(mr)) {
453 diff = int128_sub(section->size, int128_make64(addr));
454 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
460 * flatview_do_translate - translate an address in FlatView
462 * @fv: the flat view that we want to translate on
463 * @addr: the address to be translated in above address space
464 * @xlat: the translated address offset within memory region. It
466 * @plen_out: valid read/write length of the translated address. It
467 * can be @NULL when we don't care about it.
468 * @page_mask_out: page mask for the translated address. This
469 * should only be meaningful for IOMMU translated
470 * addresses, since there may be huge pages that this bit
471 * would tell. It can be @NULL if we don't care about it.
472 * @is_write: whether the translation operation is for write
473 * @is_mmio: whether this can be MMIO, set true if it can
475 * This function is called from RCU critical section
477 static MemoryRegionSection flatview_do_translate(FlatView *fv,
481 hwaddr *page_mask_out,
484 AddressSpace **target_as)
487 MemoryRegionSection *section;
488 IOMMUMemoryRegion *iommu_mr;
489 IOMMUMemoryRegionClass *imrc;
490 hwaddr page_mask = (hwaddr)(-1);
491 hwaddr plen = (hwaddr)(-1);
498 section = address_space_translate_internal(
499 flatview_to_dispatch(fv), addr, &addr,
502 iommu_mr = memory_region_get_iommu(section->mr);
506 imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
508 iotlb = imrc->translate(iommu_mr, addr, is_write ?
509 IOMMU_WO : IOMMU_RO);
510 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
511 | (addr & iotlb.addr_mask));
512 page_mask &= iotlb.addr_mask;
513 plen = MIN(plen, (addr | iotlb.addr_mask) - addr + 1);
514 if (!(iotlb.perm & (1 << is_write))) {
518 fv = address_space_to_flatview(iotlb.target_as);
519 *target_as = iotlb.target_as;
524 if (page_mask == (hwaddr)(-1)) {
525 /* Not behind an IOMMU, use default page size. */
526 page_mask = ~TARGET_PAGE_MASK;
530 *page_mask_out = page_mask;
540 return (MemoryRegionSection) { .mr = &io_mem_unassigned };
543 /* Called from RCU critical section */
544 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
547 MemoryRegionSection section;
548 hwaddr xlat, page_mask;
551 * This can never be MMIO, and we don't really care about plen,
554 section = flatview_do_translate(address_space_to_flatview(as), addr, &xlat,
555 NULL, &page_mask, is_write, false, &as);
557 /* Illegal translation */
558 if (section.mr == &io_mem_unassigned) {
562 /* Convert memory region offset into address space offset */
563 xlat += section.offset_within_address_space -
564 section.offset_within_region;
566 return (IOMMUTLBEntry) {
568 .iova = addr & ~page_mask,
569 .translated_addr = xlat & ~page_mask,
570 .addr_mask = page_mask,
571 /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
576 return (IOMMUTLBEntry) {0};
579 /* Called from RCU critical section */
580 MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
581 hwaddr *plen, bool is_write)
584 MemoryRegionSection section;
585 AddressSpace *as = NULL;
587 /* This can be MMIO, so setup MMIO bit. */
588 section = flatview_do_translate(fv, addr, xlat, plen, NULL,
589 is_write, true, &as);
592 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
593 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
594 *plen = MIN(page, *plen);
600 /* Called from RCU critical section */
601 MemoryRegionSection *
602 address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
603 hwaddr *xlat, hwaddr *plen)
605 MemoryRegionSection *section;
606 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
608 section = address_space_translate_internal(d, addr, xlat, plen, false);
610 assert(!memory_region_is_iommu(section->mr));
615 #if !defined(CONFIG_USER_ONLY)
617 static int cpu_common_post_load(void *opaque, int version_id)
619 CPUState *cpu = opaque;
621 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
622 version_id is increased. */
623 cpu->interrupt_request &= ~0x01;
629 static int cpu_common_pre_load(void *opaque)
631 CPUState *cpu = opaque;
633 cpu->exception_index = -1;
638 static bool cpu_common_exception_index_needed(void *opaque)
640 CPUState *cpu = opaque;
642 return tcg_enabled() && cpu->exception_index != -1;
645 static const VMStateDescription vmstate_cpu_common_exception_index = {
646 .name = "cpu_common/exception_index",
648 .minimum_version_id = 1,
649 .needed = cpu_common_exception_index_needed,
650 .fields = (VMStateField[]) {
651 VMSTATE_INT32(exception_index, CPUState),
652 VMSTATE_END_OF_LIST()
656 static bool cpu_common_crash_occurred_needed(void *opaque)
658 CPUState *cpu = opaque;
660 return cpu->crash_occurred;
663 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
664 .name = "cpu_common/crash_occurred",
666 .minimum_version_id = 1,
667 .needed = cpu_common_crash_occurred_needed,
668 .fields = (VMStateField[]) {
669 VMSTATE_BOOL(crash_occurred, CPUState),
670 VMSTATE_END_OF_LIST()
674 const VMStateDescription vmstate_cpu_common = {
675 .name = "cpu_common",
677 .minimum_version_id = 1,
678 .pre_load = cpu_common_pre_load,
679 .post_load = cpu_common_post_load,
680 .fields = (VMStateField[]) {
681 VMSTATE_UINT32(halted, CPUState),
682 VMSTATE_UINT32(interrupt_request, CPUState),
683 VMSTATE_END_OF_LIST()
685 .subsections = (const VMStateDescription*[]) {
686 &vmstate_cpu_common_exception_index,
687 &vmstate_cpu_common_crash_occurred,
694 CPUState *qemu_get_cpu(int index)
699 if (cpu->cpu_index == index) {
707 #if !defined(CONFIG_USER_ONLY)
708 void cpu_address_space_init(CPUState *cpu, int asidx,
709 const char *prefix, MemoryRegion *mr)
711 CPUAddressSpace *newas;
712 AddressSpace *as = g_new0(AddressSpace, 1);
715 address_space_init(as, mr, prefix);
717 /* Target code should have set num_ases before calling us */
718 assert(asidx < cpu->num_ases);
721 /* address space 0 gets the convenience alias */
725 /* KVM cannot currently support multiple address spaces. */
726 assert(asidx == 0 || !kvm_enabled());
728 if (!cpu->cpu_ases) {
729 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
732 newas = &cpu->cpu_ases[asidx];
736 newas->tcg_as_listener.commit = tcg_commit;
737 memory_listener_register(&newas->tcg_as_listener, as);
741 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
743 /* Return the AddressSpace corresponding to the specified index */
744 return cpu->cpu_ases[asidx].as;
748 void cpu_exec_unrealizefn(CPUState *cpu)
750 CPUClass *cc = CPU_GET_CLASS(cpu);
752 cpu_list_remove(cpu);
754 if (cc->vmsd != NULL) {
755 vmstate_unregister(NULL, cc->vmsd, cpu);
757 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
758 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
762 Property cpu_common_props[] = {
763 #ifndef CONFIG_USER_ONLY
764 /* Create a memory property for softmmu CPU object,
765 * so users can wire up its memory. (This can't go in qom/cpu.c
766 * because that file is compiled only once for both user-mode
767 * and system builds.) The default if no link is set up is to use
768 * the system address space.
770 DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
773 DEFINE_PROP_END_OF_LIST(),
776 void cpu_exec_initfn(CPUState *cpu)
781 #ifndef CONFIG_USER_ONLY
782 cpu->thread_id = qemu_get_thread_id();
783 cpu->memory = system_memory;
784 object_ref(OBJECT(cpu->memory));
788 void cpu_exec_realizefn(CPUState *cpu, Error **errp)
790 CPUClass *cc = CPU_GET_CLASS(cpu);
791 static bool tcg_target_initialized;
795 if (tcg_enabled() && !tcg_target_initialized) {
796 tcg_target_initialized = true;
797 cc->tcg_initialize();
800 #ifndef CONFIG_USER_ONLY
801 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
802 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
804 if (cc->vmsd != NULL) {
805 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
810 #if defined(CONFIG_USER_ONLY)
811 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
815 tb_invalidate_phys_page_range(pc, pc + 1, 0);
820 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
823 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
824 int asidx = cpu_asidx_from_attrs(cpu, attrs);
826 /* Locks grabbed by tb_invalidate_phys_addr */
827 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
828 phys | (pc & ~TARGET_PAGE_MASK));
833 #if defined(CONFIG_USER_ONLY)
834 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
839 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
845 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
849 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
850 int flags, CPUWatchpoint **watchpoint)
855 /* Add a watchpoint. */
856 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
857 int flags, CPUWatchpoint **watchpoint)
861 /* forbid ranges which are empty or run off the end of the address space */
862 if (len == 0 || (addr + len - 1) < addr) {
863 error_report("tried to set invalid watchpoint at %"
864 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
867 wp = g_malloc(sizeof(*wp));
873 /* keep all GDB-injected watchpoints in front */
874 if (flags & BP_GDB) {
875 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
877 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
880 tlb_flush_page(cpu, addr);
887 /* Remove a specific watchpoint. */
888 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
893 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
894 if (addr == wp->vaddr && len == wp->len
895 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
896 cpu_watchpoint_remove_by_ref(cpu, wp);
903 /* Remove a specific watchpoint by reference. */
904 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
906 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
908 tlb_flush_page(cpu, watchpoint->vaddr);
913 /* Remove all matching watchpoints. */
914 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
916 CPUWatchpoint *wp, *next;
918 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
919 if (wp->flags & mask) {
920 cpu_watchpoint_remove_by_ref(cpu, wp);
925 /* Return true if this watchpoint address matches the specified
926 * access (ie the address range covered by the watchpoint overlaps
927 * partially or completely with the address range covered by the
930 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
934 /* We know the lengths are non-zero, but a little caution is
935 * required to avoid errors in the case where the range ends
936 * exactly at the top of the address space and so addr + len
937 * wraps round to zero.
939 vaddr wpend = wp->vaddr + wp->len - 1;
940 vaddr addrend = addr + len - 1;
942 return !(addr > wpend || wp->vaddr > addrend);
947 /* Add a breakpoint. */
948 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
949 CPUBreakpoint **breakpoint)
953 bp = g_malloc(sizeof(*bp));
958 /* keep all GDB-injected breakpoints in front */
959 if (flags & BP_GDB) {
960 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
962 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
965 breakpoint_invalidate(cpu, pc);
973 /* Remove a specific breakpoint. */
974 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
978 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
979 if (bp->pc == pc && bp->flags == flags) {
980 cpu_breakpoint_remove_by_ref(cpu, bp);
987 /* Remove a specific breakpoint by reference. */
988 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
990 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
992 breakpoint_invalidate(cpu, breakpoint->pc);
997 /* Remove all matching breakpoints. */
998 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
1000 CPUBreakpoint *bp, *next;
1002 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
1003 if (bp->flags & mask) {
1004 cpu_breakpoint_remove_by_ref(cpu, bp);
1009 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1010 CPU loop after each instruction */
1011 void cpu_single_step(CPUState *cpu, int enabled)
1013 if (cpu->singlestep_enabled != enabled) {
1014 cpu->singlestep_enabled = enabled;
1015 if (kvm_enabled()) {
1016 kvm_update_guest_debug(cpu, 0);
1018 /* must flush all the translated code to avoid inconsistencies */
1019 /* XXX: only flush what is necessary */
1025 void cpu_abort(CPUState *cpu, const char *fmt, ...)
1032 fprintf(stderr, "qemu: fatal: ");
1033 vfprintf(stderr, fmt, ap);
1034 fprintf(stderr, "\n");
1035 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1036 if (qemu_log_separate()) {
1038 qemu_log("qemu: fatal: ");
1039 qemu_log_vprintf(fmt, ap2);
1041 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
1049 #if defined(CONFIG_USER_ONLY)
1051 struct sigaction act;
1052 sigfillset(&act.sa_mask);
1053 act.sa_handler = SIG_DFL;
1054 sigaction(SIGABRT, &act, NULL);
1060 #if !defined(CONFIG_USER_ONLY)
1061 /* Called from RCU critical section */
1062 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
1066 block = atomic_rcu_read(&ram_list.mru_block);
1067 if (block && addr - block->offset < block->max_length) {
1070 RAMBLOCK_FOREACH(block) {
1071 if (addr - block->offset < block->max_length) {
1076 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1080 /* It is safe to write mru_block outside the iothread lock. This
1085 * xxx removed from list
1089 * call_rcu(reclaim_ramblock, xxx);
1092 * atomic_rcu_set is not needed here. The block was already published
1093 * when it was placed into the list. Here we're just making an extra
1094 * copy of the pointer.
1096 ram_list.mru_block = block;
1100 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
1107 end = TARGET_PAGE_ALIGN(start + length);
1108 start &= TARGET_PAGE_MASK;
1111 block = qemu_get_ram_block(start);
1112 assert(block == qemu_get_ram_block(end - 1));
1113 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
1115 tlb_reset_dirty(cpu, start1, length);
1120 /* Note: start and end must be within the same ram block. */
1121 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1125 DirtyMemoryBlocks *blocks;
1126 unsigned long end, page;
1133 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1134 page = start >> TARGET_PAGE_BITS;
1138 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1140 while (page < end) {
1141 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1142 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1143 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1145 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1152 if (dirty && tcg_enabled()) {
1153 tlb_reset_dirty_range_all(start, length);
1159 DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
1160 (ram_addr_t start, ram_addr_t length, unsigned client)
1162 DirtyMemoryBlocks *blocks;
1163 unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
1164 ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
1165 ram_addr_t last = QEMU_ALIGN_UP(start + length, align);
1166 DirtyBitmapSnapshot *snap;
1167 unsigned long page, end, dest;
1169 snap = g_malloc0(sizeof(*snap) +
1170 ((last - first) >> (TARGET_PAGE_BITS + 3)));
1171 snap->start = first;
1174 page = first >> TARGET_PAGE_BITS;
1175 end = last >> TARGET_PAGE_BITS;
1180 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1182 while (page < end) {
1183 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1184 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1185 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1187 assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
1188 assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL)));
1189 offset >>= BITS_PER_LEVEL;
1191 bitmap_copy_and_clear_atomic(snap->dirty + dest,
1192 blocks->blocks[idx] + offset,
1195 dest += num >> BITS_PER_LEVEL;
1200 if (tcg_enabled()) {
1201 tlb_reset_dirty_range_all(start, length);
1207 bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
1211 unsigned long page, end;
1213 assert(start >= snap->start);
1214 assert(start + length <= snap->end);
1216 end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
1217 page = (start - snap->start) >> TARGET_PAGE_BITS;
1219 while (page < end) {
1220 if (test_bit(page, snap->dirty)) {
1228 /* Called from RCU critical section */
1229 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1230 MemoryRegionSection *section,
1232 hwaddr paddr, hwaddr xlat,
1234 target_ulong *address)
1239 if (memory_region_is_ram(section->mr)) {
1241 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1242 if (!section->readonly) {
1243 iotlb |= PHYS_SECTION_NOTDIRTY;
1245 iotlb |= PHYS_SECTION_ROM;
1248 AddressSpaceDispatch *d;
1250 d = flatview_to_dispatch(section->fv);
1251 iotlb = section - d->map.sections;
1255 /* Make accesses to pages with watchpoints go via the
1256 watchpoint trap routines. */
1257 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1258 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1259 /* Avoid trapping reads of pages with a write breakpoint. */
1260 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1261 iotlb = PHYS_SECTION_WATCH + paddr;
1262 *address |= TLB_MMIO;
1270 #endif /* defined(CONFIG_USER_ONLY) */
1272 #if !defined(CONFIG_USER_ONLY)
1274 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1276 static subpage_t *subpage_init(FlatView *fv, hwaddr base);
1278 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1279 qemu_anon_ram_alloc;
1282 * Set a custom physical guest memory alloator.
1283 * Accelerators with unusual needs may need this. Hopefully, we can
1284 * get rid of it eventually.
1286 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1288 phys_mem_alloc = alloc;
1291 static uint16_t phys_section_add(PhysPageMap *map,
1292 MemoryRegionSection *section)
1294 /* The physical section number is ORed with a page-aligned
1295 * pointer to produce the iotlb entries. Thus it should
1296 * never overflow into the page-aligned value.
1298 assert(map->sections_nb < TARGET_PAGE_SIZE);
1300 if (map->sections_nb == map->sections_nb_alloc) {
1301 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1302 map->sections = g_renew(MemoryRegionSection, map->sections,
1303 map->sections_nb_alloc);
1305 map->sections[map->sections_nb] = *section;
1306 memory_region_ref(section->mr);
1307 return map->sections_nb++;
1310 static void phys_section_destroy(MemoryRegion *mr)
1312 bool have_sub_page = mr->subpage;
1314 memory_region_unref(mr);
1316 if (have_sub_page) {
1317 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1318 object_unref(OBJECT(&subpage->iomem));
1323 static void phys_sections_free(PhysPageMap *map)
1325 while (map->sections_nb > 0) {
1326 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1327 phys_section_destroy(section->mr);
1329 g_free(map->sections);
1333 static void register_subpage(FlatView *fv, MemoryRegionSection *section)
1335 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1337 hwaddr base = section->offset_within_address_space
1339 MemoryRegionSection *existing = phys_page_find(d, base);
1340 MemoryRegionSection subsection = {
1341 .offset_within_address_space = base,
1342 .size = int128_make64(TARGET_PAGE_SIZE),
1346 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1348 if (!(existing->mr->subpage)) {
1349 subpage = subpage_init(fv, base);
1351 subsection.mr = &subpage->iomem;
1352 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1353 phys_section_add(&d->map, &subsection));
1355 subpage = container_of(existing->mr, subpage_t, iomem);
1357 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1358 end = start + int128_get64(section->size) - 1;
1359 subpage_register(subpage, start, end,
1360 phys_section_add(&d->map, section));
1364 static void register_multipage(FlatView *fv,
1365 MemoryRegionSection *section)
1367 AddressSpaceDispatch *d = flatview_to_dispatch(fv);
1368 hwaddr start_addr = section->offset_within_address_space;
1369 uint16_t section_index = phys_section_add(&d->map, section);
1370 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1374 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1377 void flatview_add_to_dispatch(FlatView *fv, MemoryRegionSection *section)
1379 MemoryRegionSection now = *section, remain = *section;
1380 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1382 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1383 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1384 - now.offset_within_address_space;
1386 now.size = int128_min(int128_make64(left), now.size);
1387 register_subpage(fv, &now);
1389 now.size = int128_zero();
1391 while (int128_ne(remain.size, now.size)) {
1392 remain.size = int128_sub(remain.size, now.size);
1393 remain.offset_within_address_space += int128_get64(now.size);
1394 remain.offset_within_region += int128_get64(now.size);
1396 if (int128_lt(remain.size, page_size)) {
1397 register_subpage(fv, &now);
1398 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1399 now.size = page_size;
1400 register_subpage(fv, &now);
1402 now.size = int128_and(now.size, int128_neg(page_size));
1403 register_multipage(fv, &now);
1408 void qemu_flush_coalesced_mmio_buffer(void)
1411 kvm_flush_coalesced_mmio_buffer();
1414 void qemu_mutex_lock_ramlist(void)
1416 qemu_mutex_lock(&ram_list.mutex);
1419 void qemu_mutex_unlock_ramlist(void)
1421 qemu_mutex_unlock(&ram_list.mutex);
1424 void ram_block_dump(Monitor *mon)
1430 monitor_printf(mon, "%24s %8s %18s %18s %18s\n",
1431 "Block Name", "PSize", "Offset", "Used", "Total");
1432 RAMBLOCK_FOREACH(block) {
1433 psize = size_to_str(block->page_size);
1434 monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64
1435 " 0x%016" PRIx64 "\n", block->idstr, psize,
1436 (uint64_t)block->offset,
1437 (uint64_t)block->used_length,
1438 (uint64_t)block->max_length);
1446 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
1447 * may or may not name the same files / on the same filesystem now as
1448 * when we actually open and map them. Iterate over the file
1449 * descriptors instead, and use qemu_fd_getpagesize().
1451 static int find_max_supported_pagesize(Object *obj, void *opaque)
1454 long *hpsize_min = opaque;
1456 if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
1457 mem_path = object_property_get_str(obj, "mem-path", NULL);
1459 long hpsize = qemu_mempath_getpagesize(mem_path);
1460 if (hpsize < *hpsize_min) {
1461 *hpsize_min = hpsize;
1464 *hpsize_min = getpagesize();
1471 long qemu_getrampagesize(void)
1473 long hpsize = LONG_MAX;
1474 long mainrampagesize;
1475 Object *memdev_root;
1478 mainrampagesize = qemu_mempath_getpagesize(mem_path);
1480 mainrampagesize = getpagesize();
1483 /* it's possible we have memory-backend objects with
1484 * hugepage-backed RAM. these may get mapped into system
1485 * address space via -numa parameters or memory hotplug
1486 * hooks. we want to take these into account, but we
1487 * also want to make sure these supported hugepage
1488 * sizes are applicable across the entire range of memory
1489 * we may boot from, so we take the min across all
1490 * backends, and assume normal pages in cases where a
1491 * backend isn't backed by hugepages.
1493 memdev_root = object_resolve_path("/objects", NULL);
1495 object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
1497 if (hpsize == LONG_MAX) {
1498 /* No additional memory regions found ==> Report main RAM page size */
1499 return mainrampagesize;
1502 /* If NUMA is disabled or the NUMA nodes are not backed with a
1503 * memory-backend, then there is at least one node using "normal" RAM,
1504 * so if its page size is smaller we have got to report that size instead.
1506 if (hpsize > mainrampagesize &&
1507 (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
1510 error_report("Huge page support disabled (n/a for main memory).");
1513 return mainrampagesize;
1519 long qemu_getrampagesize(void)
1521 return getpagesize();
1526 static int64_t get_file_size(int fd)
1528 int64_t size = lseek(fd, 0, SEEK_END);
1535 static int file_ram_open(const char *path,
1536 const char *region_name,
1541 char *sanitized_name;
1547 fd = open(path, O_RDWR);
1549 /* @path names an existing file, use it */
1552 if (errno == ENOENT) {
1553 /* @path names a file that doesn't exist, create it */
1554 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1559 } else if (errno == EISDIR) {
1560 /* @path names a directory, create a file there */
1561 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1562 sanitized_name = g_strdup(region_name);
1563 for (c = sanitized_name; *c != '\0'; c++) {
1569 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1571 g_free(sanitized_name);
1573 fd = mkstemp(filename);
1581 if (errno != EEXIST && errno != EINTR) {
1582 error_setg_errno(errp, errno,
1583 "can't open backing store %s for guest RAM",
1588 * Try again on EINTR and EEXIST. The latter happens when
1589 * something else creates the file between our two open().
1596 static void *file_ram_alloc(RAMBlock *block,
1604 block->page_size = qemu_fd_getpagesize(fd);
1605 block->mr->align = block->page_size;
1606 #if defined(__s390x__)
1607 if (kvm_enabled()) {
1608 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1612 if (memory < block->page_size) {
1613 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1614 "or larger than page size 0x%zx",
1615 memory, block->page_size);
1619 memory = ROUND_UP(memory, block->page_size);
1622 * ftruncate is not supported by hugetlbfs in older
1623 * hosts, so don't bother bailing out on errors.
1624 * If anything goes wrong with it under other filesystems,
1627 * Do not truncate the non-empty backend file to avoid corrupting
1628 * the existing data in the file. Disabling shrinking is not
1629 * enough. For example, the current vNVDIMM implementation stores
1630 * the guest NVDIMM labels at the end of the backend file. If the
1631 * backend file is later extended, QEMU will not be able to find
1632 * those labels. Therefore, extending the non-empty backend file
1633 * is disabled as well.
1635 if (truncate && ftruncate(fd, memory)) {
1636 perror("ftruncate");
1639 area = qemu_ram_mmap(fd, memory, block->mr->align,
1640 block->flags & RAM_SHARED);
1641 if (area == MAP_FAILED) {
1642 error_setg_errno(errp, errno,
1643 "unable to map backing store for guest RAM");
1648 os_mem_prealloc(fd, area, memory, smp_cpus, errp);
1649 if (errp && *errp) {
1650 qemu_ram_munmap(area, memory);
1660 /* Called with the ramlist lock held. */
1661 static ram_addr_t find_ram_offset(ram_addr_t size)
1663 RAMBlock *block, *next_block;
1664 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1666 assert(size != 0); /* it would hand out same offset multiple times */
1668 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1672 RAMBLOCK_FOREACH(block) {
1673 ram_addr_t end, next = RAM_ADDR_MAX;
1675 end = block->offset + block->max_length;
1677 RAMBLOCK_FOREACH(next_block) {
1678 if (next_block->offset >= end) {
1679 next = MIN(next, next_block->offset);
1682 if (next - end >= size && next - end < mingap) {
1684 mingap = next - end;
1688 if (offset == RAM_ADDR_MAX) {
1689 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1697 unsigned long last_ram_page(void)
1700 ram_addr_t last = 0;
1703 RAMBLOCK_FOREACH(block) {
1704 last = MAX(last, block->offset + block->max_length);
1707 return last >> TARGET_PAGE_BITS;
1710 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1714 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1715 if (!machine_dump_guest_core(current_machine)) {
1716 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1718 perror("qemu_madvise");
1719 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1720 "but dump_guest_core=off specified\n");
1725 const char *qemu_ram_get_idstr(RAMBlock *rb)
1730 bool qemu_ram_is_shared(RAMBlock *rb)
1732 return rb->flags & RAM_SHARED;
1735 /* Called with iothread lock held. */
1736 void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1741 assert(!new_block->idstr[0]);
1744 char *id = qdev_get_dev_path(dev);
1746 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1750 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1753 RAMBLOCK_FOREACH(block) {
1754 if (block != new_block &&
1755 !strcmp(block->idstr, new_block->idstr)) {
1756 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1764 /* Called with iothread lock held. */
1765 void qemu_ram_unset_idstr(RAMBlock *block)
1767 /* FIXME: arch_init.c assumes that this is not called throughout
1768 * migration. Ignore the problem since hot-unplug during migration
1769 * does not work anyway.
1772 memset(block->idstr, 0, sizeof(block->idstr));
1776 size_t qemu_ram_pagesize(RAMBlock *rb)
1778 return rb->page_size;
1781 /* Returns the largest size of page in use */
1782 size_t qemu_ram_pagesize_largest(void)
1787 RAMBLOCK_FOREACH(block) {
1788 largest = MAX(largest, qemu_ram_pagesize(block));
1794 static int memory_try_enable_merging(void *addr, size_t len)
1796 if (!machine_mem_merge(current_machine)) {
1797 /* disabled by the user */
1801 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1804 /* Only legal before guest might have detected the memory size: e.g. on
1805 * incoming migration, or right after reset.
1807 * As memory core doesn't know how is memory accessed, it is up to
1808 * resize callback to update device state and/or add assertions to detect
1809 * misuse, if necessary.
1811 int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1815 newsize = HOST_PAGE_ALIGN(newsize);
1817 if (block->used_length == newsize) {
1821 if (!(block->flags & RAM_RESIZEABLE)) {
1822 error_setg_errno(errp, EINVAL,
1823 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1824 " in != 0x" RAM_ADDR_FMT, block->idstr,
1825 newsize, block->used_length);
1829 if (block->max_length < newsize) {
1830 error_setg_errno(errp, EINVAL,
1831 "Length too large: %s: 0x" RAM_ADDR_FMT
1832 " > 0x" RAM_ADDR_FMT, block->idstr,
1833 newsize, block->max_length);
1837 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1838 block->used_length = newsize;
1839 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1841 memory_region_set_size(block->mr, newsize);
1842 if (block->resized) {
1843 block->resized(block->idstr, newsize, block->host);
1848 /* Called with ram_list.mutex held */
1849 static void dirty_memory_extend(ram_addr_t old_ram_size,
1850 ram_addr_t new_ram_size)
1852 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1853 DIRTY_MEMORY_BLOCK_SIZE);
1854 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1855 DIRTY_MEMORY_BLOCK_SIZE);
1858 /* Only need to extend if block count increased */
1859 if (new_num_blocks <= old_num_blocks) {
1863 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1864 DirtyMemoryBlocks *old_blocks;
1865 DirtyMemoryBlocks *new_blocks;
1868 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1869 new_blocks = g_malloc(sizeof(*new_blocks) +
1870 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1872 if (old_num_blocks) {
1873 memcpy(new_blocks->blocks, old_blocks->blocks,
1874 old_num_blocks * sizeof(old_blocks->blocks[0]));
1877 for (j = old_num_blocks; j < new_num_blocks; j++) {
1878 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1881 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1884 g_free_rcu(old_blocks, rcu);
1889 static void ram_block_add(RAMBlock *new_block, Error **errp)
1892 RAMBlock *last_block = NULL;
1893 ram_addr_t old_ram_size, new_ram_size;
1896 old_ram_size = last_ram_page();
1898 qemu_mutex_lock_ramlist();
1899 new_block->offset = find_ram_offset(new_block->max_length);
1901 if (!new_block->host) {
1902 if (xen_enabled()) {
1903 xen_ram_alloc(new_block->offset, new_block->max_length,
1904 new_block->mr, &err);
1906 error_propagate(errp, err);
1907 qemu_mutex_unlock_ramlist();
1911 new_block->host = phys_mem_alloc(new_block->max_length,
1912 &new_block->mr->align);
1913 if (!new_block->host) {
1914 error_setg_errno(errp, errno,
1915 "cannot set up guest memory '%s'",
1916 memory_region_name(new_block->mr));
1917 qemu_mutex_unlock_ramlist();
1920 memory_try_enable_merging(new_block->host, new_block->max_length);
1924 new_ram_size = MAX(old_ram_size,
1925 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1926 if (new_ram_size > old_ram_size) {
1927 dirty_memory_extend(old_ram_size, new_ram_size);
1929 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1930 * QLIST (which has an RCU-friendly variant) does not have insertion at
1931 * tail, so save the last element in last_block.
1933 RAMBLOCK_FOREACH(block) {
1935 if (block->max_length < new_block->max_length) {
1940 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1941 } else if (last_block) {
1942 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1943 } else { /* list is empty */
1944 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1946 ram_list.mru_block = NULL;
1948 /* Write list before version */
1951 qemu_mutex_unlock_ramlist();
1953 cpu_physical_memory_set_dirty_range(new_block->offset,
1954 new_block->used_length,
1957 if (new_block->host) {
1958 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1959 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1960 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1961 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1962 ram_block_notify_add(new_block->host, new_block->max_length);
1967 RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
1971 RAMBlock *new_block;
1972 Error *local_err = NULL;
1975 if (xen_enabled()) {
1976 error_setg(errp, "-mem-path not supported with Xen");
1980 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1982 "host lacks kvm mmu notifiers, -mem-path unsupported");
1986 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1988 * file_ram_alloc() needs to allocate just like
1989 * phys_mem_alloc, but we haven't bothered to provide
1993 "-mem-path not supported with this accelerator");
1997 size = HOST_PAGE_ALIGN(size);
1998 file_size = get_file_size(fd);
1999 if (file_size > 0 && file_size < size) {
2000 error_setg(errp, "backing store %s size 0x%" PRIx64
2001 " does not match 'size' option 0x" RAM_ADDR_FMT,
2002 mem_path, file_size, size);
2006 new_block = g_malloc0(sizeof(*new_block));
2008 new_block->used_length = size;
2009 new_block->max_length = size;
2010 new_block->flags = share ? RAM_SHARED : 0;
2011 new_block->host = file_ram_alloc(new_block, size, fd, !file_size, errp);
2012 if (!new_block->host) {
2017 ram_block_add(new_block, &local_err);
2020 error_propagate(errp, local_err);
2028 RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
2029 bool share, const char *mem_path,
2036 fd = file_ram_open(mem_path, memory_region_name(mr), &created, errp);
2041 block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
2055 RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
2056 void (*resized)(const char*,
2059 void *host, bool resizeable,
2060 MemoryRegion *mr, Error **errp)
2062 RAMBlock *new_block;
2063 Error *local_err = NULL;
2065 size = HOST_PAGE_ALIGN(size);
2066 max_size = HOST_PAGE_ALIGN(max_size);
2067 new_block = g_malloc0(sizeof(*new_block));
2069 new_block->resized = resized;
2070 new_block->used_length = size;
2071 new_block->max_length = max_size;
2072 assert(max_size >= size);
2074 new_block->page_size = getpagesize();
2075 new_block->host = host;
2077 new_block->flags |= RAM_PREALLOC;
2080 new_block->flags |= RAM_RESIZEABLE;
2082 ram_block_add(new_block, &local_err);
2085 error_propagate(errp, local_err);
2091 RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2092 MemoryRegion *mr, Error **errp)
2094 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
2097 RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
2099 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
2102 RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
2103 void (*resized)(const char*,
2106 MemoryRegion *mr, Error **errp)
2108 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
2111 static void reclaim_ramblock(RAMBlock *block)
2113 if (block->flags & RAM_PREALLOC) {
2115 } else if (xen_enabled()) {
2116 xen_invalidate_map_cache_entry(block->host);
2118 } else if (block->fd >= 0) {
2119 qemu_ram_munmap(block->host, block->max_length);
2123 qemu_anon_ram_free(block->host, block->max_length);
2128 void qemu_ram_free(RAMBlock *block)
2135 ram_block_notify_remove(block->host, block->max_length);
2138 qemu_mutex_lock_ramlist();
2139 QLIST_REMOVE_RCU(block, next);
2140 ram_list.mru_block = NULL;
2141 /* Write list before version */
2144 call_rcu(block, reclaim_ramblock, rcu);
2145 qemu_mutex_unlock_ramlist();
2149 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2156 RAMBLOCK_FOREACH(block) {
2157 offset = addr - block->offset;
2158 if (offset < block->max_length) {
2159 vaddr = ramblock_ptr(block, offset);
2160 if (block->flags & RAM_PREALLOC) {
2162 } else if (xen_enabled()) {
2166 if (block->fd >= 0) {
2167 flags |= (block->flags & RAM_SHARED ?
2168 MAP_SHARED : MAP_PRIVATE);
2169 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2170 flags, block->fd, offset);
2173 * Remap needs to match alloc. Accelerators that
2174 * set phys_mem_alloc never remap. If they did,
2175 * we'd need a remap hook here.
2177 assert(phys_mem_alloc == qemu_anon_ram_alloc);
2179 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2180 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2183 if (area != vaddr) {
2184 fprintf(stderr, "Could not remap addr: "
2185 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2189 memory_try_enable_merging(vaddr, length);
2190 qemu_ram_setup_dump(vaddr, length);
2195 #endif /* !_WIN32 */
2197 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2198 * This should not be used for general purpose DMA. Use address_space_map
2199 * or address_space_rw instead. For local memory (e.g. video ram) that the
2200 * device owns, use memory_region_get_ram_ptr.
2202 * Called within RCU critical section.
2204 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2206 RAMBlock *block = ram_block;
2208 if (block == NULL) {
2209 block = qemu_get_ram_block(addr);
2210 addr -= block->offset;
2213 if (xen_enabled() && block->host == NULL) {
2214 /* We need to check if the requested address is in the RAM
2215 * because we don't want to map the entire memory in QEMU.
2216 * In that case just map until the end of the page.
2218 if (block->offset == 0) {
2219 return xen_map_cache(addr, 0, 0, false);
2222 block->host = xen_map_cache(block->offset, block->max_length, 1, false);
2224 return ramblock_ptr(block, addr);
2227 /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
2228 * but takes a size argument.
2230 * Called within RCU critical section.
2232 static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
2233 hwaddr *size, bool lock)
2235 RAMBlock *block = ram_block;
2240 if (block == NULL) {
2241 block = qemu_get_ram_block(addr);
2242 addr -= block->offset;
2244 *size = MIN(*size, block->max_length - addr);
2246 if (xen_enabled() && block->host == NULL) {
2247 /* We need to check if the requested address is in the RAM
2248 * because we don't want to map the entire memory in QEMU.
2249 * In that case just map the requested area.
2251 if (block->offset == 0) {
2252 return xen_map_cache(addr, *size, lock, lock);
2255 block->host = xen_map_cache(block->offset, block->max_length, 1, lock);
2258 return ramblock_ptr(block, addr);
2262 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
2265 * ptr: Host pointer to look up
2266 * round_offset: If true round the result offset down to a page boundary
2267 * *ram_addr: set to result ram_addr
2268 * *offset: set to result offset within the RAMBlock
2270 * Returns: RAMBlock (or NULL if not found)
2272 * By the time this function returns, the returned pointer is not protected
2273 * by RCU anymore. If the caller is not within an RCU critical section and
2274 * does not hold the iothread lock, it must have other means of protecting the
2275 * pointer, such as a reference to the region that includes the incoming
2278 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
2282 uint8_t *host = ptr;
2284 if (xen_enabled()) {
2285 ram_addr_t ram_addr;
2287 ram_addr = xen_ram_addr_from_mapcache(ptr);
2288 block = qemu_get_ram_block(ram_addr);
2290 *offset = ram_addr - block->offset;
2297 block = atomic_rcu_read(&ram_list.mru_block);
2298 if (block && block->host && host - block->host < block->max_length) {
2302 RAMBLOCK_FOREACH(block) {
2303 /* This case append when the block is not mapped. */
2304 if (block->host == NULL) {
2307 if (host - block->host < block->max_length) {
2316 *offset = (host - block->host);
2318 *offset &= TARGET_PAGE_MASK;
2325 * Finds the named RAMBlock
2327 * name: The name of RAMBlock to find
2329 * Returns: RAMBlock (or NULL if not found)
2331 RAMBlock *qemu_ram_block_by_name(const char *name)
2335 RAMBLOCK_FOREACH(block) {
2336 if (!strcmp(name, block->idstr)) {
2344 /* Some of the softmmu routines need to translate from a host pointer
2345 (typically a TLB entry) back to a ram offset. */
2346 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2351 block = qemu_ram_block_from_host(ptr, false, &offset);
2353 return RAM_ADDR_INVALID;
2356 return block->offset + offset;
2359 /* Called within RCU critical section. */
2360 void memory_notdirty_write_prepare(NotDirtyInfo *ndi,
2363 ram_addr_t ram_addr,
2367 ndi->ram_addr = ram_addr;
2368 ndi->mem_vaddr = mem_vaddr;
2370 ndi->locked = false;
2372 assert(tcg_enabled());
2373 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2376 tb_invalidate_phys_page_fast(ram_addr, size);
2380 /* Called within RCU critical section. */
2381 void memory_notdirty_write_complete(NotDirtyInfo *ndi)
2387 /* Set both VGA and migration bits for simplicity and to remove
2388 * the notdirty callback faster.
2390 cpu_physical_memory_set_dirty_range(ndi->ram_addr, ndi->size,
2391 DIRTY_CLIENTS_NOCODE);
2392 /* we remove the notdirty callback only if the code has been
2394 if (!cpu_physical_memory_is_clean(ndi->ram_addr)) {
2395 tlb_set_dirty(ndi->cpu, ndi->mem_vaddr);
2399 /* Called within RCU critical section. */
2400 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2401 uint64_t val, unsigned size)
2405 memory_notdirty_write_prepare(&ndi, current_cpu, current_cpu->mem_io_vaddr,
2410 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2413 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2416 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2419 stq_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2424 memory_notdirty_write_complete(&ndi);
2427 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2428 unsigned size, bool is_write)
2433 static const MemoryRegionOps notdirty_mem_ops = {
2434 .write = notdirty_mem_write,
2435 .valid.accepts = notdirty_mem_accepts,
2436 .endianness = DEVICE_NATIVE_ENDIAN,
2438 .min_access_size = 1,
2439 .max_access_size = 8,
2443 .min_access_size = 1,
2444 .max_access_size = 8,
2449 /* Generate a debug exception if a watchpoint has been hit. */
2450 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2452 CPUState *cpu = current_cpu;
2453 CPUClass *cc = CPU_GET_CLASS(cpu);
2457 assert(tcg_enabled());
2458 if (cpu->watchpoint_hit) {
2459 /* We re-entered the check after replacing the TB. Now raise
2460 * the debug interrupt so that is will trigger after the
2461 * current instruction. */
2462 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2465 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2466 vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len);
2467 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2468 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2469 && (wp->flags & flags)) {
2470 if (flags == BP_MEM_READ) {
2471 wp->flags |= BP_WATCHPOINT_HIT_READ;
2473 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2475 wp->hitaddr = vaddr;
2476 wp->hitattrs = attrs;
2477 if (!cpu->watchpoint_hit) {
2478 if (wp->flags & BP_CPU &&
2479 !cc->debug_check_watchpoint(cpu, wp)) {
2480 wp->flags &= ~BP_WATCHPOINT_HIT;
2483 cpu->watchpoint_hit = wp;
2485 /* Both tb_lock and iothread_mutex will be reset when
2486 * cpu_loop_exit or cpu_loop_exit_noexc longjmp
2487 * back into the cpu_exec main loop.
2490 tb_check_watchpoint(cpu);
2491 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2492 cpu->exception_index = EXCP_DEBUG;
2495 /* Force execution of one insn next time. */
2496 cpu->cflags_next_tb = 1 | curr_cflags();
2497 cpu_loop_exit_noexc(cpu);
2501 wp->flags &= ~BP_WATCHPOINT_HIT;
2506 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2507 so these check for a hit then pass through to the normal out-of-line
2509 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2510 unsigned size, MemTxAttrs attrs)
2514 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2515 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2517 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2520 data = address_space_ldub(as, addr, attrs, &res);
2523 data = address_space_lduw(as, addr, attrs, &res);
2526 data = address_space_ldl(as, addr, attrs, &res);
2529 data = address_space_ldq(as, addr, attrs, &res);
2537 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2538 uint64_t val, unsigned size,
2542 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2543 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2545 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2548 address_space_stb(as, addr, val, attrs, &res);
2551 address_space_stw(as, addr, val, attrs, &res);
2554 address_space_stl(as, addr, val, attrs, &res);
2557 address_space_stq(as, addr, val, attrs, &res);
2564 static const MemoryRegionOps watch_mem_ops = {
2565 .read_with_attrs = watch_mem_read,
2566 .write_with_attrs = watch_mem_write,
2567 .endianness = DEVICE_NATIVE_ENDIAN,
2569 .min_access_size = 1,
2570 .max_access_size = 8,
2574 .min_access_size = 1,
2575 .max_access_size = 8,
2580 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
2581 const uint8_t *buf, int len);
2582 static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
2585 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2586 unsigned len, MemTxAttrs attrs)
2588 subpage_t *subpage = opaque;
2592 #if defined(DEBUG_SUBPAGE)
2593 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2594 subpage, len, addr);
2596 res = flatview_read(subpage->fv, addr + subpage->base, attrs, buf, len);
2602 *data = ldub_p(buf);
2605 *data = lduw_p(buf);
2618 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2619 uint64_t value, unsigned len, MemTxAttrs attrs)
2621 subpage_t *subpage = opaque;
2624 #if defined(DEBUG_SUBPAGE)
2625 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2626 " value %"PRIx64"\n",
2627 __func__, subpage, len, addr, value);
2645 return flatview_write(subpage->fv, addr + subpage->base, attrs, buf, len);
2648 static bool subpage_accepts(void *opaque, hwaddr addr,
2649 unsigned len, bool is_write)
2651 subpage_t *subpage = opaque;
2652 #if defined(DEBUG_SUBPAGE)
2653 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2654 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2657 return flatview_access_valid(subpage->fv, addr + subpage->base,
2661 static const MemoryRegionOps subpage_ops = {
2662 .read_with_attrs = subpage_read,
2663 .write_with_attrs = subpage_write,
2664 .impl.min_access_size = 1,
2665 .impl.max_access_size = 8,
2666 .valid.min_access_size = 1,
2667 .valid.max_access_size = 8,
2668 .valid.accepts = subpage_accepts,
2669 .endianness = DEVICE_NATIVE_ENDIAN,
2672 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2677 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2679 idx = SUBPAGE_IDX(start);
2680 eidx = SUBPAGE_IDX(end);
2681 #if defined(DEBUG_SUBPAGE)
2682 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2683 __func__, mmio, start, end, idx, eidx, section);
2685 for (; idx <= eidx; idx++) {
2686 mmio->sub_section[idx] = section;
2692 static subpage_t *subpage_init(FlatView *fv, hwaddr base)
2696 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
2699 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2700 NULL, TARGET_PAGE_SIZE);
2701 mmio->iomem.subpage = true;
2702 #if defined(DEBUG_SUBPAGE)
2703 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2704 mmio, base, TARGET_PAGE_SIZE);
2706 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2711 static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
2714 MemoryRegionSection section = {
2717 .offset_within_address_space = 0,
2718 .offset_within_region = 0,
2719 .size = int128_2_64(),
2722 return phys_section_add(map, §ion);
2725 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2727 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2728 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2729 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2730 MemoryRegionSection *sections = d->map.sections;
2732 return sections[index & ~TARGET_PAGE_MASK].mr;
2735 static void io_mem_init(void)
2737 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2738 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2741 /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
2742 * which can be called without the iothread mutex.
2744 memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL,
2746 memory_region_clear_global_locking(&io_mem_notdirty);
2748 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2752 AddressSpaceDispatch *address_space_dispatch_new(FlatView *fv)
2754 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2757 n = dummy_section(&d->map, fv, &io_mem_unassigned);
2758 assert(n == PHYS_SECTION_UNASSIGNED);
2759 n = dummy_section(&d->map, fv, &io_mem_notdirty);
2760 assert(n == PHYS_SECTION_NOTDIRTY);
2761 n = dummy_section(&d->map, fv, &io_mem_rom);
2762 assert(n == PHYS_SECTION_ROM);
2763 n = dummy_section(&d->map, fv, &io_mem_watch);
2764 assert(n == PHYS_SECTION_WATCH);
2766 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2771 void address_space_dispatch_free(AddressSpaceDispatch *d)
2773 phys_sections_free(&d->map);
2777 static void tcg_commit(MemoryListener *listener)
2779 CPUAddressSpace *cpuas;
2780 AddressSpaceDispatch *d;
2782 /* since each CPU stores ram addresses in its TLB cache, we must
2783 reset the modified entries */
2784 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2785 cpu_reloading_memory_map();
2786 /* The CPU and TLB are protected by the iothread lock.
2787 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2788 * may have split the RCU critical section.
2790 d = address_space_to_dispatch(cpuas->as);
2791 atomic_rcu_set(&cpuas->memory_dispatch, d);
2792 tlb_flush(cpuas->cpu);
2795 static void memory_map_init(void)
2797 system_memory = g_malloc(sizeof(*system_memory));
2799 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2800 address_space_init(&address_space_memory, system_memory, "memory");
2802 system_io = g_malloc(sizeof(*system_io));
2803 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2805 address_space_init(&address_space_io, system_io, "I/O");
2808 MemoryRegion *get_system_memory(void)
2810 return system_memory;
2813 MemoryRegion *get_system_io(void)
2818 #endif /* !defined(CONFIG_USER_ONLY) */
2820 /* physical memory access (slow version, mainly for debug) */
2821 #if defined(CONFIG_USER_ONLY)
2822 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2823 uint8_t *buf, int len, int is_write)
2830 page = addr & TARGET_PAGE_MASK;
2831 l = (page + TARGET_PAGE_SIZE) - addr;
2834 flags = page_get_flags(page);
2835 if (!(flags & PAGE_VALID))
2838 if (!(flags & PAGE_WRITE))
2840 /* XXX: this code should not depend on lock_user */
2841 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2844 unlock_user(p, addr, l);
2846 if (!(flags & PAGE_READ))
2848 /* XXX: this code should not depend on lock_user */
2849 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2852 unlock_user(p, addr, 0);
2863 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2866 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2867 addr += memory_region_get_ram_addr(mr);
2869 /* No early return if dirty_log_mask is or becomes 0, because
2870 * cpu_physical_memory_set_dirty_range will still call
2871 * xen_modified_memory.
2873 if (dirty_log_mask) {
2875 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2877 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2878 assert(tcg_enabled());
2880 tb_invalidate_phys_range(addr, addr + length);
2882 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2884 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2887 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2889 unsigned access_size_max = mr->ops->valid.max_access_size;
2891 /* Regions are assumed to support 1-4 byte accesses unless
2892 otherwise specified. */
2893 if (access_size_max == 0) {
2894 access_size_max = 4;
2897 /* Bound the maximum access by the alignment of the address. */
2898 if (!mr->ops->impl.unaligned) {
2899 unsigned align_size_max = addr & -addr;
2900 if (align_size_max != 0 && align_size_max < access_size_max) {
2901 access_size_max = align_size_max;
2905 /* Don't attempt accesses larger than the maximum. */
2906 if (l > access_size_max) {
2907 l = access_size_max;
2914 static bool prepare_mmio_access(MemoryRegion *mr)
2916 bool unlocked = !qemu_mutex_iothread_locked();
2917 bool release_lock = false;
2919 if (unlocked && mr->global_locking) {
2920 qemu_mutex_lock_iothread();
2922 release_lock = true;
2924 if (mr->flush_coalesced_mmio) {
2926 qemu_mutex_lock_iothread();
2928 qemu_flush_coalesced_mmio_buffer();
2930 qemu_mutex_unlock_iothread();
2934 return release_lock;
2937 /* Called within RCU critical section. */
2938 static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr,
2941 int len, hwaddr addr1,
2942 hwaddr l, MemoryRegion *mr)
2946 MemTxResult result = MEMTX_OK;
2947 bool release_lock = false;
2950 if (!memory_access_is_direct(mr, true)) {
2951 release_lock |= prepare_mmio_access(mr);
2952 l = memory_access_size(mr, l, addr1);
2953 /* XXX: could force current_cpu to NULL to avoid
2957 /* 64 bit write access */
2959 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2963 /* 32 bit write access */
2964 val = (uint32_t)ldl_p(buf);
2965 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2969 /* 16 bit write access */
2971 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2975 /* 8 bit write access */
2977 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2985 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
2986 memcpy(ptr, buf, l);
2987 invalidate_and_set_dirty(mr, addr1, l);
2991 qemu_mutex_unlock_iothread();
2992 release_lock = false;
3004 mr = flatview_translate(fv, addr, &addr1, &l, true);
3010 static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
3011 const uint8_t *buf, int len)
3016 MemTxResult result = MEMTX_OK;
3021 mr = flatview_translate(fv, addr, &addr1, &l, true);
3022 result = flatview_write_continue(fv, addr, attrs, buf, len,
3030 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
3032 const uint8_t *buf, int len)
3034 return flatview_write(address_space_to_flatview(as), addr, attrs, buf, len);
3037 /* Called within RCU critical section. */
3038 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
3039 MemTxAttrs attrs, uint8_t *buf,
3040 int len, hwaddr addr1, hwaddr l,
3045 MemTxResult result = MEMTX_OK;
3046 bool release_lock = false;
3049 if (!memory_access_is_direct(mr, false)) {
3051 release_lock |= prepare_mmio_access(mr);
3052 l = memory_access_size(mr, l, addr1);
3055 /* 64 bit read access */
3056 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
3061 /* 32 bit read access */
3062 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
3067 /* 16 bit read access */
3068 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
3073 /* 8 bit read access */
3074 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
3083 ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false);
3084 memcpy(buf, ptr, l);
3088 qemu_mutex_unlock_iothread();
3089 release_lock = false;
3101 mr = flatview_translate(fv, addr, &addr1, &l, false);
3107 MemTxResult flatview_read_full(FlatView *fv, hwaddr addr,
3108 MemTxAttrs attrs, uint8_t *buf, int len)
3113 MemTxResult result = MEMTX_OK;
3118 mr = flatview_translate(fv, addr, &addr1, &l, false);
3119 result = flatview_read_continue(fv, addr, attrs, buf, len,
3127 static MemTxResult flatview_rw(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
3128 uint8_t *buf, int len, bool is_write)
3131 return flatview_write(fv, addr, attrs, (uint8_t *)buf, len);
3133 return flatview_read(fv, addr, attrs, (uint8_t *)buf, len);
3137 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
3138 MemTxAttrs attrs, uint8_t *buf,
3139 int len, bool is_write)
3141 return flatview_rw(address_space_to_flatview(as),
3142 addr, attrs, buf, len, is_write);
3145 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
3146 int len, int is_write)
3148 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
3149 buf, len, is_write);
3152 enum write_rom_type {
3157 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
3158 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
3168 mr = address_space_translate(as, addr, &addr1, &l, true);
3170 if (!(memory_region_is_ram(mr) ||
3171 memory_region_is_romd(mr))) {
3172 l = memory_access_size(mr, l, addr1);
3175 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3178 memcpy(ptr, buf, l);
3179 invalidate_and_set_dirty(mr, addr1, l);
3182 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
3193 /* used for ROM loading : can write in RAM and ROM */
3194 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
3195 const uint8_t *buf, int len)
3197 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
3200 void cpu_flush_icache_range(hwaddr start, int len)
3203 * This function should do the same thing as an icache flush that was
3204 * triggered from within the guest. For TCG we are always cache coherent,
3205 * so there is no need to flush anything. For KVM / Xen we need to flush
3206 * the host's instruction cache at least.
3208 if (tcg_enabled()) {
3212 cpu_physical_memory_write_rom_internal(&address_space_memory,
3213 start, NULL, len, FLUSH_CACHE);
3224 static BounceBuffer bounce;
3226 typedef struct MapClient {
3228 QLIST_ENTRY(MapClient) link;
3231 QemuMutex map_client_list_lock;
3232 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3233 = QLIST_HEAD_INITIALIZER(map_client_list);
3235 static void cpu_unregister_map_client_do(MapClient *client)
3237 QLIST_REMOVE(client, link);
3241 static void cpu_notify_map_clients_locked(void)
3245 while (!QLIST_EMPTY(&map_client_list)) {
3246 client = QLIST_FIRST(&map_client_list);
3247 qemu_bh_schedule(client->bh);
3248 cpu_unregister_map_client_do(client);
3252 void cpu_register_map_client(QEMUBH *bh)
3254 MapClient *client = g_malloc(sizeof(*client));
3256 qemu_mutex_lock(&map_client_list_lock);
3258 QLIST_INSERT_HEAD(&map_client_list, client, link);
3259 if (!atomic_read(&bounce.in_use)) {
3260 cpu_notify_map_clients_locked();
3262 qemu_mutex_unlock(&map_client_list_lock);
3265 void cpu_exec_init_all(void)
3267 qemu_mutex_init(&ram_list.mutex);
3268 /* The data structures we set up here depend on knowing the page size,
3269 * so no more changes can be made after this point.
3270 * In an ideal world, nothing we did before we had finished the
3271 * machine setup would care about the target page size, and we could
3272 * do this much later, rather than requiring board models to state
3273 * up front what their requirements are.
3275 finalize_target_page_bits();
3278 qemu_mutex_init(&map_client_list_lock);
3281 void cpu_unregister_map_client(QEMUBH *bh)
3285 qemu_mutex_lock(&map_client_list_lock);
3286 QLIST_FOREACH(client, &map_client_list, link) {
3287 if (client->bh == bh) {
3288 cpu_unregister_map_client_do(client);
3292 qemu_mutex_unlock(&map_client_list_lock);
3295 static void cpu_notify_map_clients(void)
3297 qemu_mutex_lock(&map_client_list_lock);
3298 cpu_notify_map_clients_locked();
3299 qemu_mutex_unlock(&map_client_list_lock);
3302 static bool flatview_access_valid(FlatView *fv, hwaddr addr, int len,
3311 mr = flatview_translate(fv, addr, &xlat, &l, is_write);
3312 if (!memory_access_is_direct(mr, is_write)) {
3313 l = memory_access_size(mr, l, addr);
3314 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
3327 bool address_space_access_valid(AddressSpace *as, hwaddr addr,
3328 int len, bool is_write)
3330 return flatview_access_valid(address_space_to_flatview(as),
3331 addr, len, is_write);
3335 flatview_extend_translation(FlatView *fv, hwaddr addr,
3337 MemoryRegion *mr, hwaddr base, hwaddr len,
3342 MemoryRegion *this_mr;
3348 if (target_len == 0) {
3353 this_mr = flatview_translate(fv, addr, &xlat,
3355 if (this_mr != mr || xlat != base + done) {
3361 /* Map a physical memory region into a host virtual address.
3362 * May map a subset of the requested range, given by and returned in *plen.
3363 * May return NULL if resources needed to perform the mapping are exhausted.
3364 * Use only for reads OR writes - not for read-modify-write operations.
3365 * Use cpu_register_map_client() to know when retrying the map operation is
3366 * likely to succeed.
3368 void *address_space_map(AddressSpace *as,
3377 FlatView *fv = address_space_to_flatview(as);
3385 mr = flatview_translate(fv, addr, &xlat, &l, is_write);
3387 if (!memory_access_is_direct(mr, is_write)) {
3388 if (atomic_xchg(&bounce.in_use, true)) {
3392 /* Avoid unbounded allocations */
3393 l = MIN(l, TARGET_PAGE_SIZE);
3394 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
3398 memory_region_ref(mr);
3401 flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED,
3407 return bounce.buffer;
3411 memory_region_ref(mr);
3412 *plen = flatview_extend_translation(fv, addr, len, mr, xlat,
3414 ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true);
3420 /* Unmaps a memory region previously mapped by address_space_map().
3421 * Will also mark the memory as dirty if is_write == 1. access_len gives
3422 * the amount of memory that was actually read or written by the caller.
3424 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3425 int is_write, hwaddr access_len)
3427 if (buffer != bounce.buffer) {
3431 mr = memory_region_from_host(buffer, &addr1);
3434 invalidate_and_set_dirty(mr, addr1, access_len);
3436 if (xen_enabled()) {
3437 xen_invalidate_map_cache_entry(buffer);
3439 memory_region_unref(mr);
3443 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3444 bounce.buffer, access_len);
3446 qemu_vfree(bounce.buffer);
3447 bounce.buffer = NULL;
3448 memory_region_unref(bounce.mr);
3449 atomic_mb_set(&bounce.in_use, false);
3450 cpu_notify_map_clients();
3453 void *cpu_physical_memory_map(hwaddr addr,
3457 return address_space_map(&address_space_memory, addr, plen, is_write);
3460 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3461 int is_write, hwaddr access_len)
3463 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3466 #define ARG1_DECL AddressSpace *as
3469 #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__)
3470 #define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write)
3471 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3472 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3473 #define RCU_READ_LOCK(...) rcu_read_lock()
3474 #define RCU_READ_UNLOCK(...) rcu_read_unlock()
3475 #include "memory_ldst.inc.c"
3477 int64_t address_space_cache_init(MemoryRegionCache *cache,
3489 void address_space_cache_invalidate(MemoryRegionCache *cache,
3495 void address_space_cache_destroy(MemoryRegionCache *cache)
3500 #define ARG1_DECL MemoryRegionCache *cache
3502 #define SUFFIX _cached
3503 #define TRANSLATE(addr, ...) \
3504 address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
3505 #define IS_DIRECT(mr, is_write) true
3506 #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
3507 #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
3508 #define RCU_READ_LOCK() rcu_read_lock()
3509 #define RCU_READ_UNLOCK() rcu_read_unlock()
3510 #include "memory_ldst.inc.c"
3512 /* virtual memory access for debug (includes writing to ROM) */
3513 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3514 uint8_t *buf, int len, int is_write)
3520 cpu_synchronize_state(cpu);
3525 page = addr & TARGET_PAGE_MASK;
3526 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3527 asidx = cpu_asidx_from_attrs(cpu, attrs);
3528 /* if no physical page mapped, return an error */
3529 if (phys_addr == -1)
3531 l = (page + TARGET_PAGE_SIZE) - addr;
3534 phys_addr += (addr & ~TARGET_PAGE_MASK);
3536 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3539 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3540 MEMTXATTRS_UNSPECIFIED,
3551 * Allows code that needs to deal with migration bitmaps etc to still be built
3552 * target independent.
3554 size_t qemu_target_page_size(void)
3556 return TARGET_PAGE_SIZE;
3559 int qemu_target_page_bits(void)
3561 return TARGET_PAGE_BITS;
3564 int qemu_target_page_bits_min(void)
3566 return TARGET_PAGE_BITS_MIN;
3571 * A helper function for the _utterly broken_ virtio device model to find out if
3572 * it's running on a big endian machine. Don't do this at home kids!
3574 bool target_words_bigendian(void);
3575 bool target_words_bigendian(void)
3577 #if defined(TARGET_WORDS_BIGENDIAN)
3584 #ifndef CONFIG_USER_ONLY
3585 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3592 mr = address_space_translate(&address_space_memory,
3593 phys_addr, &phys_addr, &l, false);
3595 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3600 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3606 RAMBLOCK_FOREACH(block) {
3607 ret = func(block->idstr, block->host, block->offset,
3608 block->used_length, opaque);
3618 * Unmap pages of memory from start to start+length such that
3619 * they a) read as 0, b) Trigger whatever fault mechanism
3620 * the OS provides for postcopy.
3621 * The pages must be unmapped by the end of the function.
3622 * Returns: 0 on success, none-0 on failure
3625 int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
3629 uint8_t *host_startaddr = rb->host + start;
3631 if ((uintptr_t)host_startaddr & (rb->page_size - 1)) {
3632 error_report("ram_block_discard_range: Unaligned start address: %p",
3637 if ((start + length) <= rb->used_length) {
3638 uint8_t *host_endaddr = host_startaddr + length;
3639 if ((uintptr_t)host_endaddr & (rb->page_size - 1)) {
3640 error_report("ram_block_discard_range: Unaligned end address: %p",
3645 errno = ENOTSUP; /* If we are missing MADVISE etc */
3647 if (rb->page_size == qemu_host_page_size) {
3648 #if defined(CONFIG_MADVISE)
3649 /* Note: We need the madvise MADV_DONTNEED behaviour of definitely
3652 ret = madvise(host_startaddr, length, MADV_DONTNEED);
3655 /* Huge page case - unfortunately it can't do DONTNEED, but
3656 * it can do the equivalent by FALLOC_FL_PUNCH_HOLE in the
3659 #ifdef CONFIG_FALLOCATE_PUNCH_HOLE
3660 ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
3666 error_report("ram_block_discard_range: Failed to discard range "
3667 "%s:%" PRIx64 " +%zx (%d)",
3668 rb->idstr, start, length, ret);
3671 error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
3672 "/%zx/" RAM_ADDR_FMT")",
3673 rb->idstr, start, length, rb->used_length);
3682 void page_size_init(void)
3684 /* NOTE: we can always suppose that qemu_host_page_size >=
3686 if (qemu_host_page_size == 0) {
3687 qemu_host_page_size = qemu_real_host_page_size;
3689 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
3690 qemu_host_page_size = TARGET_PAGE_SIZE;
3692 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
3695 #if !defined(CONFIG_USER_ONLY)
3697 static void mtree_print_phys_entries(fprintf_function mon, void *f,
3698 int start, int end, int skip, int ptr)
3700 if (start == end - 1) {
3701 mon(f, "\t%3d ", start);
3703 mon(f, "\t%3d..%-3d ", start, end - 1);
3705 mon(f, " skip=%d ", skip);
3706 if (ptr == PHYS_MAP_NODE_NIL) {
3709 mon(f, " ptr=#%d", ptr);
3711 mon(f, " ptr=[%d]", ptr);
3716 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3717 int128_sub((size), int128_one())) : 0)
3719 void mtree_print_dispatch(fprintf_function mon, void *f,
3720 AddressSpaceDispatch *d, MemoryRegion *root)
3724 mon(f, " Dispatch\n");
3725 mon(f, " Physical sections\n");
3727 for (i = 0; i < d->map.sections_nb; ++i) {
3728 MemoryRegionSection *s = d->map.sections + i;
3729 const char *names[] = { " [unassigned]", " [not dirty]",
3730 " [ROM]", " [watch]" };
3732 mon(f, " #%d @" TARGET_FMT_plx ".." TARGET_FMT_plx " %s%s%s%s%s",
3734 s->offset_within_address_space,
3735 s->offset_within_address_space + MR_SIZE(s->mr->size),
3736 s->mr->name ? s->mr->name : "(noname)",
3737 i < ARRAY_SIZE(names) ? names[i] : "",
3738 s->mr == root ? " [ROOT]" : "",
3739 s == d->mru_section ? " [MRU]" : "",
3740 s->mr->is_iommu ? " [iommu]" : "");
3743 mon(f, " alias=%s", s->mr->alias->name ?
3744 s->mr->alias->name : "noname");
3749 mon(f, " Nodes (%d bits per level, %d levels) ptr=[%d] skip=%d\n",
3750 P_L2_BITS, P_L2_LEVELS, d->phys_map.ptr, d->phys_map.skip);
3751 for (i = 0; i < d->map.nodes_nb; ++i) {
3754 Node *n = d->map.nodes + i;
3756 mon(f, " [%d]\n", i);
3758 for (j = 0, jprev = 0, prev = *n[0]; j < ARRAY_SIZE(*n); ++j) {
3759 PhysPageEntry *pe = *n + j;
3761 if (pe->ptr == prev.ptr && pe->skip == prev.skip) {
3765 mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr);
3771 if (jprev != ARRAY_SIZE(*n)) {
3772 mtree_print_phys_entries(mon, f, jprev, j, prev.skip, prev.ptr);