2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include "exec/memory.h"
20 #include "qapi/visitor.h"
21 #include "qemu/bitops.h"
22 #include "qemu/error-report.h"
23 #include "qemu/main-loop.h"
24 #include "qemu/qemu-print.h"
25 #include "qom/object.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/tcg.h"
33 #include "qemu/accel.h"
34 #include "hw/boards.h"
35 #include "migration/vmstate.h"
36 #include "exec/address-spaces.h"
38 //#define DEBUG_UNASSIGNED
40 static unsigned memory_region_transaction_depth;
41 static bool memory_region_update_pending;
42 static bool ioeventfd_update_pending;
43 unsigned int global_dirty_tracking;
45 static QTAILQ_HEAD(, MemoryListener) memory_listeners
46 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
48 static QTAILQ_HEAD(, AddressSpace) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces);
51 static GHashTable *flat_views;
53 typedef struct AddrRange AddrRange;
56 * Note that signed integers are needed for negative offsetting in aliases
57 * (large MemoryRegion::alias_offset).
64 static AddrRange addrrange_make(Int128 start, Int128 size)
66 return (AddrRange) { start, size };
69 static bool addrrange_equal(AddrRange r1, AddrRange r2)
71 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
74 static Int128 addrrange_end(AddrRange r)
76 return int128_add(r.start, r.size);
79 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
81 int128_addto(&range.start, delta);
85 static bool addrrange_contains(AddrRange range, Int128 addr)
87 return int128_ge(addr, range.start)
88 && int128_lt(addr, addrrange_end(range));
91 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
93 return addrrange_contains(r1, r2.start)
94 || addrrange_contains(r2, r1.start);
97 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
99 Int128 start = int128_max(r1.start, r2.start);
100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
101 return addrrange_make(start, int128_sub(end, start));
104 enum ListenerDirection { Forward, Reverse };
106 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
108 MemoryListener *_listener; \
110 switch (_direction) { \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
130 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
132 MemoryListener *_listener; \
134 switch (_direction) { \
136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
137 if (_listener->_callback) { \
138 _listener->_callback(_listener, _section, ##_args); \
143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
162 struct CoalescedMemoryRange {
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
167 struct MemoryRegionIoeventfd {
174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
177 if (int128_lt(a->addr.start, b->addr.start)) {
179 } else if (int128_gt(a->addr.start, b->addr.start)) {
181 } else if (int128_lt(a->addr.size, b->addr.size)) {
183 } else if (int128_gt(a->addr.size, b->addr.size)) {
185 } else if (a->match_data < b->match_data) {
187 } else if (a->match_data > b->match_data) {
189 } else if (a->match_data) {
190 if (a->data < b->data) {
192 } else if (a->data > b->data) {
198 } else if (a->e > b->e) {
204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
207 if (int128_eq(a->addr.start, b->addr.start) &&
208 (!int128_nz(a->addr.size) || !int128_nz(b->addr.size) ||
209 (int128_eq(a->addr.size, b->addr.size) &&
210 (a->match_data == b->match_data) &&
211 ((a->match_data && (a->data == b->data)) || !a->match_data) &&
218 /* Range of memory in the global map. Addresses are absolute. */
221 hwaddr offset_in_region;
223 uint8_t dirty_log_mask;
230 #define FOR_EACH_FLAT_RANGE(var, view) \
231 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
233 static inline MemoryRegionSection
234 section_from_flat_range(FlatRange *fr, FlatView *fv)
236 return (MemoryRegionSection) {
239 .offset_within_region = fr->offset_in_region,
240 .size = fr->addr.size,
241 .offset_within_address_space = int128_get64(fr->addr.start),
242 .readonly = fr->readonly,
243 .nonvolatile = fr->nonvolatile,
244 .unmergeable = fr->unmergeable,
248 static bool flatrange_equal(FlatRange *a, FlatRange *b)
250 return a->mr == b->mr
251 && addrrange_equal(a->addr, b->addr)
252 && a->offset_in_region == b->offset_in_region
253 && a->romd_mode == b->romd_mode
254 && a->readonly == b->readonly
255 && a->nonvolatile == b->nonvolatile
256 && a->unmergeable == b->unmergeable;
259 static FlatView *flatview_new(MemoryRegion *mr_root)
263 view = g_new0(FlatView, 1);
265 view->root = mr_root;
266 memory_region_ref(mr_root);
267 trace_flatview_new(view, mr_root);
272 /* Insert a range into a given position. Caller is responsible for maintaining
275 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
277 if (view->nr == view->nr_allocated) {
278 view->nr_allocated = MAX(2 * view->nr, 10);
279 view->ranges = g_realloc(view->ranges,
280 view->nr_allocated * sizeof(*view->ranges));
282 memmove(view->ranges + pos + 1, view->ranges + pos,
283 (view->nr - pos) * sizeof(FlatRange));
284 view->ranges[pos] = *range;
285 memory_region_ref(range->mr);
289 static void flatview_destroy(FlatView *view)
293 trace_flatview_destroy(view, view->root);
294 if (view->dispatch) {
295 address_space_dispatch_free(view->dispatch);
297 for (i = 0; i < view->nr; i++) {
298 memory_region_unref(view->ranges[i].mr);
300 g_free(view->ranges);
301 memory_region_unref(view->root);
305 static bool flatview_ref(FlatView *view)
307 return qatomic_fetch_inc_nonzero(&view->ref) > 0;
310 void flatview_unref(FlatView *view)
312 if (qatomic_fetch_dec(&view->ref) == 1) {
313 trace_flatview_destroy_rcu(view, view->root);
315 call_rcu(view, flatview_destroy, rcu);
319 static bool can_merge(FlatRange *r1, FlatRange *r2)
321 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
323 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
325 int128_make64(r2->offset_in_region))
326 && r1->dirty_log_mask == r2->dirty_log_mask
327 && r1->romd_mode == r2->romd_mode
328 && r1->readonly == r2->readonly
329 && r1->nonvolatile == r2->nonvolatile
330 && !r1->unmergeable && !r2->unmergeable;
333 /* Attempt to simplify a view by merging adjacent ranges */
334 static void flatview_simplify(FlatView *view)
339 while (i < view->nr) {
342 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
343 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
347 for (k = i; k < j; k++) {
348 memory_region_unref(view->ranges[k].mr);
350 memmove(&view->ranges[i], &view->ranges[j],
351 (view->nr - j) * sizeof(view->ranges[j]));
356 static bool memory_region_big_endian(MemoryRegion *mr)
358 #if TARGET_BIG_ENDIAN
359 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
361 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
365 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
367 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
368 switch (op & MO_SIZE) {
372 *data = bswap16(*data);
375 *data = bswap32(*data);
378 *data = bswap64(*data);
381 g_assert_not_reached();
386 static inline void memory_region_shift_read_access(uint64_t *value,
392 *value |= (tmp & mask) << shift;
394 *value |= (tmp & mask) >> -shift;
398 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
405 tmp = (*value >> shift) & mask;
407 tmp = (*value << -shift) & mask;
413 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
416 hwaddr abs_addr = offset;
418 abs_addr += mr->addr;
419 for (root = mr; root->container; ) {
420 root = root->container;
421 abs_addr += root->addr;
427 static int get_cpu_index(void)
430 return current_cpu->cpu_index;
435 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
445 tmp = mr->ops->read(mr->opaque, addr, size);
447 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
448 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
449 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
450 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
451 memory_region_name(mr));
453 memory_region_shift_read_access(value, shift, mask, tmp);
457 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
468 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
470 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
471 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) {
472 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
473 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size,
474 memory_region_name(mr));
476 memory_region_shift_read_access(value, shift, mask, tmp);
480 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
488 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
491 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
492 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
493 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
494 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
495 memory_region_name(mr));
497 mr->ops->write(mr->opaque, addr, tmp, size);
501 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
509 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
512 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
513 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) {
514 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
515 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size,
516 memory_region_name(mr));
518 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
521 static MemTxResult access_with_adjusted_size(hwaddr addr,
524 unsigned access_size_min,
525 unsigned access_size_max,
526 MemTxResult (*access_fn)
537 uint64_t access_mask;
538 unsigned access_size;
540 MemTxResult r = MEMTX_OK;
541 bool reentrancy_guard_applied = false;
543 if (!access_size_min) {
546 if (!access_size_max) {
550 /* Do not allow more than one simultaneous access to a device's IO Regions */
551 if (mr->dev && !mr->disable_reentrancy_guard &&
552 !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) {
553 if (mr->dev->mem_reentrancy_guard.engaged_in_io) {
554 warn_report_once("Blocked re-entrant IO on MemoryRegion: "
555 "%s at addr: 0x%" HWADDR_PRIX,
556 memory_region_name(mr), addr);
557 return MEMTX_ACCESS_ERROR;
559 mr->dev->mem_reentrancy_guard.engaged_in_io = true;
560 reentrancy_guard_applied = true;
563 /* FIXME: support unaligned access? */
564 access_size = MAX(MIN(size, access_size_max), access_size_min);
565 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
566 if (memory_region_big_endian(mr)) {
567 for (i = 0; i < size; i += access_size) {
568 r |= access_fn(mr, addr + i, value, access_size,
569 (size - access_size - i) * 8, access_mask, attrs);
572 for (i = 0; i < size; i += access_size) {
573 r |= access_fn(mr, addr + i, value, access_size, i * 8,
577 if (mr->dev && reentrancy_guard_applied) {
578 mr->dev->mem_reentrancy_guard.engaged_in_io = false;
583 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
587 while (mr->container) {
590 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
591 if (mr == as->root) {
598 /* Render a memory region into the global view. Ranges in @view obscure
601 static void render_memory_region(FlatView *view,
609 MemoryRegion *subregion;
611 hwaddr offset_in_region;
621 int128_addto(&base, int128_make64(mr->addr));
622 readonly |= mr->readonly;
623 nonvolatile |= mr->nonvolatile;
624 unmergeable |= mr->unmergeable;
626 tmp = addrrange_make(base, mr->size);
628 if (!addrrange_intersects(tmp, clip)) {
632 clip = addrrange_intersection(tmp, clip);
635 int128_subfrom(&base, int128_make64(mr->alias->addr));
636 int128_subfrom(&base, int128_make64(mr->alias_offset));
637 render_memory_region(view, mr->alias, base, clip,
638 readonly, nonvolatile, unmergeable);
642 /* Render subregions in priority order. */
643 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
644 render_memory_region(view, subregion, base, clip,
645 readonly, nonvolatile, unmergeable);
648 if (!mr->terminates) {
652 offset_in_region = int128_get64(int128_sub(clip.start, base));
657 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
658 fr.romd_mode = mr->romd_mode;
659 fr.readonly = readonly;
660 fr.nonvolatile = nonvolatile;
661 fr.unmergeable = unmergeable;
663 /* Render the region itself into any gaps left by the current view. */
664 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
665 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
668 if (int128_lt(base, view->ranges[i].addr.start)) {
669 now = int128_min(remain,
670 int128_sub(view->ranges[i].addr.start, base));
671 fr.offset_in_region = offset_in_region;
672 fr.addr = addrrange_make(base, now);
673 flatview_insert(view, i, &fr);
675 int128_addto(&base, now);
676 offset_in_region += int128_get64(now);
677 int128_subfrom(&remain, now);
679 now = int128_sub(int128_min(int128_add(base, remain),
680 addrrange_end(view->ranges[i].addr)),
682 int128_addto(&base, now);
683 offset_in_region += int128_get64(now);
684 int128_subfrom(&remain, now);
686 if (int128_nz(remain)) {
687 fr.offset_in_region = offset_in_region;
688 fr.addr = addrrange_make(base, remain);
689 flatview_insert(view, i, &fr);
693 void flatview_for_each_range(FlatView *fv, flatview_cb cb , void *opaque)
700 FOR_EACH_FLAT_RANGE(fr, fv) {
701 if (cb(fr->addr.start, fr->addr.size, fr->mr,
702 fr->offset_in_region, opaque)) {
708 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
710 while (mr->enabled) {
712 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
713 /* The alias is included in its entirety. Use it as
714 * the "real" root, so that we can share more FlatViews.
719 } else if (!mr->terminates) {
720 unsigned int found = 0;
721 MemoryRegion *child, *next = NULL;
722 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
723 if (child->enabled) {
728 if (!child->addr && int128_ge(mr->size, child->size)) {
729 /* A child is included in its entirety. If it's the only
730 * enabled one, use it in the hope of finding an alias down the
731 * way. This will also let us share FlatViews.
752 /* Render a memory topology into a list of disjoint absolute ranges. */
753 static FlatView *generate_memory_topology(MemoryRegion *mr)
758 view = flatview_new(mr);
761 render_memory_region(view, mr, int128_zero(),
762 addrrange_make(int128_zero(), int128_2_64()),
763 false, false, false);
765 flatview_simplify(view);
767 view->dispatch = address_space_dispatch_new(view);
768 for (i = 0; i < view->nr; i++) {
769 MemoryRegionSection mrs =
770 section_from_flat_range(&view->ranges[i], view);
771 flatview_add_to_dispatch(view, &mrs);
773 address_space_dispatch_compact(view->dispatch);
774 g_hash_table_replace(flat_views, mr, view);
779 static void address_space_add_del_ioeventfds(AddressSpace *as,
780 MemoryRegionIoeventfd *fds_new,
782 MemoryRegionIoeventfd *fds_old,
786 MemoryRegionIoeventfd *fd;
787 MemoryRegionSection section;
789 /* Generate a symmetric difference of the old and new fd sets, adding
790 * and deleting as necessary.
794 while (iold < fds_old_nb || inew < fds_new_nb) {
795 if (iold < fds_old_nb
796 && (inew == fds_new_nb
797 || memory_region_ioeventfd_before(&fds_old[iold],
800 section = (MemoryRegionSection) {
801 .fv = address_space_to_flatview(as),
802 .offset_within_address_space = int128_get64(fd->addr.start),
803 .size = fd->addr.size,
805 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, §ion,
806 fd->match_data, fd->data, fd->e);
808 } else if (inew < fds_new_nb
809 && (iold == fds_old_nb
810 || memory_region_ioeventfd_before(&fds_new[inew],
813 section = (MemoryRegionSection) {
814 .fv = address_space_to_flatview(as),
815 .offset_within_address_space = int128_get64(fd->addr.start),
816 .size = fd->addr.size,
818 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, §ion,
819 fd->match_data, fd->data, fd->e);
828 FlatView *address_space_get_flatview(AddressSpace *as)
832 RCU_READ_LOCK_GUARD();
834 view = address_space_to_flatview(as);
835 /* If somebody has replaced as->current_map concurrently,
836 * flatview_ref returns false.
838 } while (!flatview_ref(view));
842 static void address_space_update_ioeventfds(AddressSpace *as)
846 unsigned ioeventfd_nb = 0;
847 unsigned ioeventfd_max;
848 MemoryRegionIoeventfd *ioeventfds;
852 if (!as->ioeventfd_notifiers) {
857 * It is likely that the number of ioeventfds hasn't changed much, so use
858 * the previous size as the starting value, with some headroom to avoid
859 * gratuitous reallocations.
861 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4);
862 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max);
864 view = address_space_get_flatview(as);
865 FOR_EACH_FLAT_RANGE(fr, view) {
866 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
867 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
868 int128_sub(fr->addr.start,
869 int128_make64(fr->offset_in_region)));
870 if (addrrange_intersects(fr->addr, tmp)) {
872 if (ioeventfd_nb > ioeventfd_max) {
873 ioeventfd_max = MAX(ioeventfd_max * 2, 4);
874 ioeventfds = g_realloc(ioeventfds,
875 ioeventfd_max * sizeof(*ioeventfds));
877 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
878 ioeventfds[ioeventfd_nb-1].addr = tmp;
883 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
884 as->ioeventfds, as->ioeventfd_nb);
886 g_free(as->ioeventfds);
887 as->ioeventfds = ioeventfds;
888 as->ioeventfd_nb = ioeventfd_nb;
889 flatview_unref(view);
893 * Notify the memory listeners about the coalesced IO change events of
894 * range `cmr'. Only the part that has intersection of the specified
895 * FlatRange will be sent.
897 static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
898 CoalescedMemoryRange *cmr, bool add)
902 tmp = addrrange_shift(cmr->addr,
903 int128_sub(fr->addr.start,
904 int128_make64(fr->offset_in_region)));
905 if (!addrrange_intersects(tmp, fr->addr)) {
908 tmp = addrrange_intersection(tmp, fr->addr);
911 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
912 int128_get64(tmp.start),
913 int128_get64(tmp.size));
915 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
916 int128_get64(tmp.start),
917 int128_get64(tmp.size));
921 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
923 CoalescedMemoryRange *cmr;
925 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
926 flat_range_coalesced_io_notify(fr, as, cmr, false);
930 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
932 MemoryRegion *mr = fr->mr;
933 CoalescedMemoryRange *cmr;
935 if (QTAILQ_EMPTY(&mr->coalesced)) {
939 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
940 flat_range_coalesced_io_notify(fr, as, cmr, true);
944 static void address_space_update_topology_pass(AddressSpace *as,
945 const FlatView *old_view,
946 const FlatView *new_view,
950 FlatRange *frold, *frnew;
952 /* Generate a symmetric difference of the old and new memory maps.
953 * Kill ranges in the old map, and instantiate ranges in the new map.
956 while (iold < old_view->nr || inew < new_view->nr) {
957 if (iold < old_view->nr) {
958 frold = &old_view->ranges[iold];
962 if (inew < new_view->nr) {
963 frnew = &new_view->ranges[inew];
970 || int128_lt(frold->addr.start, frnew->addr.start)
971 || (int128_eq(frold->addr.start, frnew->addr.start)
972 && !flatrange_equal(frold, frnew)))) {
973 /* In old but not in new, or in both but attributes changed. */
976 flat_range_coalesced_io_del(frold, as);
977 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
981 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
982 /* In both and unchanged (except logging may have changed) */
985 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
986 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
987 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
988 frold->dirty_log_mask,
989 frnew->dirty_log_mask);
991 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
992 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
993 frold->dirty_log_mask,
994 frnew->dirty_log_mask);
1004 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
1005 flat_range_coalesced_io_add(frnew, as);
1013 static void flatviews_init(void)
1015 static FlatView *empty_view;
1021 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
1022 (GDestroyNotify) flatview_unref);
1024 empty_view = generate_memory_topology(NULL);
1025 /* We keep it alive forever in the global variable. */
1026 flatview_ref(empty_view);
1028 g_hash_table_replace(flat_views, NULL, empty_view);
1029 flatview_ref(empty_view);
1033 static void flatviews_reset(void)
1038 g_hash_table_unref(flat_views);
1043 /* Render unique FVs */
1044 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1045 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1047 if (g_hash_table_lookup(flat_views, physmr)) {
1051 generate_memory_topology(physmr);
1055 static void address_space_set_flatview(AddressSpace *as)
1057 FlatView *old_view = address_space_to_flatview(as);
1058 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1059 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1063 if (old_view == new_view) {
1068 flatview_ref(old_view);
1071 flatview_ref(new_view);
1073 if (!QTAILQ_EMPTY(&as->listeners)) {
1074 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1077 old_view2 = &tmpview;
1079 address_space_update_topology_pass(as, old_view2, new_view, false);
1080 address_space_update_topology_pass(as, old_view2, new_view, true);
1083 /* Writes are protected by the BQL. */
1084 qatomic_rcu_set(&as->current_map, new_view);
1086 flatview_unref(old_view);
1089 /* Note that all the old MemoryRegions are still alive up to this
1090 * point. This relieves most MemoryListeners from the need to
1091 * ref/unref the MemoryRegions they get---unless they use them
1092 * outside the iothread mutex, in which case precise reference
1093 * counting is necessary.
1096 flatview_unref(old_view);
1100 static void address_space_update_topology(AddressSpace *as)
1102 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1105 if (!g_hash_table_lookup(flat_views, physmr)) {
1106 generate_memory_topology(physmr);
1108 address_space_set_flatview(as);
1111 void memory_region_transaction_begin(void)
1113 qemu_flush_coalesced_mmio_buffer();
1114 ++memory_region_transaction_depth;
1117 void memory_region_transaction_commit(void)
1121 assert(memory_region_transaction_depth);
1122 assert(qemu_mutex_iothread_locked());
1124 --memory_region_transaction_depth;
1125 if (!memory_region_transaction_depth) {
1126 if (memory_region_update_pending) {
1129 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1131 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1132 address_space_set_flatview(as);
1133 address_space_update_ioeventfds(as);
1135 memory_region_update_pending = false;
1136 ioeventfd_update_pending = false;
1137 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1138 } else if (ioeventfd_update_pending) {
1139 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1140 address_space_update_ioeventfds(as);
1142 ioeventfd_update_pending = false;
1147 static void memory_region_destructor_none(MemoryRegion *mr)
1151 static void memory_region_destructor_ram(MemoryRegion *mr)
1153 qemu_ram_free(mr->ram_block);
1156 static bool memory_region_need_escape(char c)
1158 return c == '/' || c == '[' || c == '\\' || c == ']';
1161 static char *memory_region_escape_name(const char *name)
1168 for (p = name; *p; p++) {
1169 bytes += memory_region_need_escape(*p) ? 4 : 1;
1171 if (bytes == p - name) {
1172 return g_memdup(name, bytes + 1);
1175 escaped = g_malloc(bytes + 1);
1176 for (p = name, q = escaped; *p; p++) {
1178 if (unlikely(memory_region_need_escape(c))) {
1181 *q++ = "0123456789abcdef"[c >> 4];
1182 c = "0123456789abcdef"[c & 15];
1190 static void memory_region_do_init(MemoryRegion *mr,
1195 mr->size = int128_make64(size);
1196 if (size == UINT64_MAX) {
1197 mr->size = int128_2_64();
1199 mr->name = g_strdup(name);
1201 mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE);
1202 mr->ram_block = NULL;
1205 char *escaped_name = memory_region_escape_name(name);
1206 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1209 owner = container_get(qdev_get_machine(), "/unattached");
1212 object_property_add_child(owner, name_array, OBJECT(mr));
1213 object_unref(OBJECT(mr));
1215 g_free(escaped_name);
1219 void memory_region_init(MemoryRegion *mr,
1224 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1225 memory_region_do_init(mr, owner, name, size);
1228 static void memory_region_get_container(Object *obj, Visitor *v,
1229 const char *name, void *opaque,
1232 MemoryRegion *mr = MEMORY_REGION(obj);
1233 char *path = (char *)"";
1235 if (mr->container) {
1236 path = object_get_canonical_path(OBJECT(mr->container));
1238 visit_type_str(v, name, &path, errp);
1239 if (mr->container) {
1244 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1247 MemoryRegion *mr = MEMORY_REGION(obj);
1249 return OBJECT(mr->container);
1252 static void memory_region_get_priority(Object *obj, Visitor *v,
1253 const char *name, void *opaque,
1256 MemoryRegion *mr = MEMORY_REGION(obj);
1257 int32_t value = mr->priority;
1259 visit_type_int32(v, name, &value, errp);
1262 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1263 void *opaque, Error **errp)
1265 MemoryRegion *mr = MEMORY_REGION(obj);
1266 uint64_t value = memory_region_size(mr);
1268 visit_type_uint64(v, name, &value, errp);
1271 static void memory_region_initfn(Object *obj)
1273 MemoryRegion *mr = MEMORY_REGION(obj);
1276 mr->ops = &unassigned_mem_ops;
1278 mr->romd_mode = true;
1279 mr->destructor = memory_region_destructor_none;
1280 QTAILQ_INIT(&mr->subregions);
1281 QTAILQ_INIT(&mr->coalesced);
1283 op = object_property_add(OBJECT(mr), "container",
1284 "link<" TYPE_MEMORY_REGION ">",
1285 memory_region_get_container,
1286 NULL, /* memory_region_set_container */
1288 op->resolve = memory_region_resolve_container;
1290 object_property_add_uint64_ptr(OBJECT(mr), "addr",
1291 &mr->addr, OBJ_PROP_FLAG_READ);
1292 object_property_add(OBJECT(mr), "priority", "uint32",
1293 memory_region_get_priority,
1294 NULL, /* memory_region_set_priority */
1296 object_property_add(OBJECT(mr), "size", "uint64",
1297 memory_region_get_size,
1298 NULL, /* memory_region_set_size, */
1302 static void iommu_memory_region_initfn(Object *obj)
1304 MemoryRegion *mr = MEMORY_REGION(obj);
1306 mr->is_iommu = true;
1309 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1312 #ifdef DEBUG_UNASSIGNED
1313 printf("Unassigned mem read " HWADDR_FMT_plx "\n", addr);
1318 static void unassigned_mem_write(void *opaque, hwaddr addr,
1319 uint64_t val, unsigned size)
1321 #ifdef DEBUG_UNASSIGNED
1322 printf("Unassigned mem write " HWADDR_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1326 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1327 unsigned size, bool is_write,
1333 const MemoryRegionOps unassigned_mem_ops = {
1334 .valid.accepts = unassigned_mem_accepts,
1335 .endianness = DEVICE_NATIVE_ENDIAN,
1338 static uint64_t memory_region_ram_device_read(void *opaque,
1339 hwaddr addr, unsigned size)
1341 MemoryRegion *mr = opaque;
1342 uint64_t data = (uint64_t)~0;
1346 data = *(uint8_t *)(mr->ram_block->host + addr);
1349 data = *(uint16_t *)(mr->ram_block->host + addr);
1352 data = *(uint32_t *)(mr->ram_block->host + addr);
1355 data = *(uint64_t *)(mr->ram_block->host + addr);
1359 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1364 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1365 uint64_t data, unsigned size)
1367 MemoryRegion *mr = opaque;
1369 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1373 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1376 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1379 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1382 *(uint64_t *)(mr->ram_block->host + addr) = data;
1387 static const MemoryRegionOps ram_device_mem_ops = {
1388 .read = memory_region_ram_device_read,
1389 .write = memory_region_ram_device_write,
1390 .endianness = DEVICE_HOST_ENDIAN,
1392 .min_access_size = 1,
1393 .max_access_size = 8,
1397 .min_access_size = 1,
1398 .max_access_size = 8,
1403 bool memory_region_access_valid(MemoryRegion *mr,
1409 if (mr->ops->valid.accepts
1410 && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) {
1411 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1412 ", size %u, region '%s', reason: rejected\n",
1413 is_write ? "write" : "read",
1414 addr, size, memory_region_name(mr));
1418 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1419 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1420 ", size %u, region '%s', reason: unaligned\n",
1421 is_write ? "write" : "read",
1422 addr, size, memory_region_name(mr));
1426 /* Treat zero as compatibility all valid */
1427 if (!mr->ops->valid.max_access_size) {
1431 if (size > mr->ops->valid.max_access_size
1432 || size < mr->ops->valid.min_access_size) {
1433 qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX
1434 ", size %u, region '%s', reason: invalid size "
1435 "(min:%u max:%u)\n",
1436 is_write ? "write" : "read",
1437 addr, size, memory_region_name(mr),
1438 mr->ops->valid.min_access_size,
1439 mr->ops->valid.max_access_size);
1445 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1453 if (mr->ops->read) {
1454 return access_with_adjusted_size(addr, pval, size,
1455 mr->ops->impl.min_access_size,
1456 mr->ops->impl.max_access_size,
1457 memory_region_read_accessor,
1460 return access_with_adjusted_size(addr, pval, size,
1461 mr->ops->impl.min_access_size,
1462 mr->ops->impl.max_access_size,
1463 memory_region_read_with_attrs_accessor,
1468 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1474 unsigned size = memop_size(op);
1478 return memory_region_dispatch_read(mr->alias,
1479 mr->alias_offset + addr,
1482 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1483 *pval = unassigned_mem_read(mr, addr, size);
1484 return MEMTX_DECODE_ERROR;
1487 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1488 adjust_endianness(mr, pval, op);
1492 /* Return true if an eventfd was signalled */
1493 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1499 MemoryRegionIoeventfd ioeventfd = {
1500 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1505 for (i = 0; i < mr->ioeventfd_nb; i++) {
1506 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1507 ioeventfd.e = mr->ioeventfds[i].e;
1509 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1510 event_notifier_set(ioeventfd.e);
1518 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1524 unsigned size = memop_size(op);
1527 return memory_region_dispatch_write(mr->alias,
1528 mr->alias_offset + addr,
1531 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1532 unassigned_mem_write(mr, addr, data, size);
1533 return MEMTX_DECODE_ERROR;
1536 adjust_endianness(mr, &data, op);
1539 * FIXME: it's not clear why under KVM the write would be processed
1540 * directly, instead of going through eventfd. This probably should
1541 * test "tcg_enabled() || qtest_enabled()", or should just go away.
1543 if (!kvm_enabled() &&
1544 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1548 if (mr->ops->write) {
1549 return access_with_adjusted_size(addr, &data, size,
1550 mr->ops->impl.min_access_size,
1551 mr->ops->impl.max_access_size,
1552 memory_region_write_accessor, mr,
1556 access_with_adjusted_size(addr, &data, size,
1557 mr->ops->impl.min_access_size,
1558 mr->ops->impl.max_access_size,
1559 memory_region_write_with_attrs_accessor,
1564 void memory_region_init_io(MemoryRegion *mr,
1566 const MemoryRegionOps *ops,
1571 memory_region_init(mr, owner, name, size);
1572 mr->ops = ops ? ops : &unassigned_mem_ops;
1573 mr->opaque = opaque;
1574 mr->terminates = true;
1577 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1583 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
1586 void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1594 memory_region_init(mr, owner, name, size);
1596 mr->terminates = true;
1597 mr->destructor = memory_region_destructor_ram;
1598 mr->ram_block = qemu_ram_alloc(size, ram_flags, mr, &err);
1600 mr->size = int128_zero();
1601 object_unparent(OBJECT(mr));
1602 error_propagate(errp, err);
1606 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1611 void (*resized)(const char*,
1617 memory_region_init(mr, owner, name, size);
1619 mr->terminates = true;
1620 mr->destructor = memory_region_destructor_ram;
1621 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1624 mr->size = int128_zero();
1625 object_unparent(OBJECT(mr));
1626 error_propagate(errp, err);
1631 void memory_region_init_ram_from_file(MemoryRegion *mr,
1642 memory_region_init(mr, owner, name, size);
1644 mr->readonly = !!(ram_flags & RAM_READONLY);
1645 mr->terminates = true;
1646 mr->destructor = memory_region_destructor_ram;
1648 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path,
1651 mr->size = int128_zero();
1652 object_unparent(OBJECT(mr));
1653 error_propagate(errp, err);
1657 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1667 memory_region_init(mr, owner, name, size);
1669 mr->readonly = !!(ram_flags & RAM_READONLY);
1670 mr->terminates = true;
1671 mr->destructor = memory_region_destructor_ram;
1672 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, ram_flags, fd, offset,
1675 mr->size = int128_zero();
1676 object_unparent(OBJECT(mr));
1677 error_propagate(errp, err);
1682 void memory_region_init_ram_ptr(MemoryRegion *mr,
1688 memory_region_init(mr, owner, name, size);
1690 mr->terminates = true;
1691 mr->destructor = memory_region_destructor_ram;
1693 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1694 assert(ptr != NULL);
1695 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1698 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1704 memory_region_init(mr, owner, name, size);
1706 mr->terminates = true;
1707 mr->ram_device = true;
1708 mr->ops = &ram_device_mem_ops;
1710 mr->destructor = memory_region_destructor_ram;
1712 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1713 assert(ptr != NULL);
1714 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1717 void memory_region_init_alias(MemoryRegion *mr,
1724 memory_region_init(mr, owner, name, size);
1726 mr->alias_offset = offset;
1729 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1735 memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp);
1736 mr->readonly = true;
1739 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1741 const MemoryRegionOps *ops,
1749 memory_region_init(mr, owner, name, size);
1751 mr->opaque = opaque;
1752 mr->terminates = true;
1753 mr->rom_device = true;
1754 mr->destructor = memory_region_destructor_ram;
1755 mr->ram_block = qemu_ram_alloc(size, 0, mr, &err);
1757 mr->size = int128_zero();
1758 object_unparent(OBJECT(mr));
1759 error_propagate(errp, err);
1763 void memory_region_init_iommu(void *_iommu_mr,
1764 size_t instance_size,
1765 const char *mrtypename,
1770 struct IOMMUMemoryRegion *iommu_mr;
1771 struct MemoryRegion *mr;
1773 object_initialize(_iommu_mr, instance_size, mrtypename);
1774 mr = MEMORY_REGION(_iommu_mr);
1775 memory_region_do_init(mr, owner, name, size);
1776 iommu_mr = IOMMU_MEMORY_REGION(mr);
1777 mr->terminates = true; /* then re-forwards */
1778 QLIST_INIT(&iommu_mr->iommu_notify);
1779 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1782 static void memory_region_finalize(Object *obj)
1784 MemoryRegion *mr = MEMORY_REGION(obj);
1786 assert(!mr->container);
1788 /* We know the region is not visible in any address space (it
1789 * does not have a container and cannot be a root either because
1790 * it has no references, so we can blindly clear mr->enabled.
1791 * memory_region_set_enabled instead could trigger a transaction
1792 * and cause an infinite loop.
1794 mr->enabled = false;
1795 memory_region_transaction_begin();
1796 while (!QTAILQ_EMPTY(&mr->subregions)) {
1797 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1798 memory_region_del_subregion(mr, subregion);
1800 memory_region_transaction_commit();
1803 memory_region_clear_coalescing(mr);
1804 g_free((char *)mr->name);
1805 g_free(mr->ioeventfds);
1808 Object *memory_region_owner(MemoryRegion *mr)
1810 Object *obj = OBJECT(mr);
1814 void memory_region_ref(MemoryRegion *mr)
1816 /* MMIO callbacks most likely will access data that belongs
1817 * to the owner, hence the need to ref/unref the owner whenever
1818 * the memory region is in use.
1820 * The memory region is a child of its owner. As long as the
1821 * owner doesn't call unparent itself on the memory region,
1822 * ref-ing the owner will also keep the memory region alive.
1823 * Memory regions without an owner are supposed to never go away;
1824 * we do not ref/unref them because it slows down DMA sensibly.
1826 if (mr && mr->owner) {
1827 object_ref(mr->owner);
1831 void memory_region_unref(MemoryRegion *mr)
1833 if (mr && mr->owner) {
1834 object_unref(mr->owner);
1838 uint64_t memory_region_size(MemoryRegion *mr)
1840 if (int128_eq(mr->size, int128_2_64())) {
1843 return int128_get64(mr->size);
1846 const char *memory_region_name(const MemoryRegion *mr)
1849 ((MemoryRegion *)mr)->name =
1850 g_strdup(object_get_canonical_path_component(OBJECT(mr)));
1855 bool memory_region_is_ram_device(MemoryRegion *mr)
1857 return mr->ram_device;
1860 bool memory_region_is_protected(MemoryRegion *mr)
1862 return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
1865 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1867 uint8_t mask = mr->dirty_log_mask;
1868 RAMBlock *rb = mr->ram_block;
1870 if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) ||
1871 memory_region_is_iommu(mr))) {
1872 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1875 if (tcg_enabled() && rb) {
1876 /* TCG only cares about dirty memory logging for RAM, not IOMMU. */
1877 mask |= (1 << DIRTY_MEMORY_CODE);
1882 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1884 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1887 static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr,
1890 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1891 IOMMUNotifier *iommu_notifier;
1892 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1895 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1896 flags |= iommu_notifier->notifier_flags;
1899 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1900 ret = imrc->notify_flag_changed(iommu_mr,
1901 iommu_mr->iommu_notify_flags,
1906 iommu_mr->iommu_notify_flags = flags;
1911 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1912 uint64_t page_size_mask,
1915 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1918 if (imrc->iommu_set_page_size_mask) {
1919 ret = imrc->iommu_set_page_size_mask(iommu_mr, page_size_mask, errp);
1924 int memory_region_iommu_set_iova_ranges(IOMMUMemoryRegion *iommu_mr,
1928 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1931 if (imrc->iommu_set_iova_ranges) {
1932 ret = imrc->iommu_set_iova_ranges(iommu_mr, iova_ranges, errp);
1937 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1938 IOMMUNotifier *n, Error **errp)
1940 IOMMUMemoryRegion *iommu_mr;
1944 return memory_region_register_iommu_notifier(mr->alias, n, errp);
1947 /* We need to register for at least one bitfield */
1948 iommu_mr = IOMMU_MEMORY_REGION(mr);
1949 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1950 assert(n->start <= n->end);
1951 assert(n->iommu_idx >= 0 &&
1952 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1954 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1955 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp);
1957 QLIST_REMOVE(n, node);
1962 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1964 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1966 if (imrc->get_min_page_size) {
1967 return imrc->get_min_page_size(iommu_mr);
1969 return TARGET_PAGE_SIZE;
1972 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1974 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1975 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1976 hwaddr addr, granularity;
1977 IOMMUTLBEntry iotlb;
1979 /* If the IOMMU has its own replay callback, override */
1981 imrc->replay(iommu_mr, n);
1985 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1987 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1988 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1989 if (iotlb.perm != IOMMU_NONE) {
1990 n->notify(n, &iotlb);
1993 /* if (2^64 - MR size) < granularity, it's possible to get an
1994 * infinite loop here. This should catch such a wraparound */
1995 if ((addr + granularity) < addr) {
2001 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
2004 IOMMUMemoryRegion *iommu_mr;
2007 memory_region_unregister_iommu_notifier(mr->alias, n);
2010 QLIST_REMOVE(n, node);
2011 iommu_mr = IOMMU_MEMORY_REGION(mr);
2012 memory_region_update_iommu_notify_flags(iommu_mr, NULL);
2015 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
2016 IOMMUTLBEvent *event)
2018 IOMMUTLBEntry *entry = &event->entry;
2019 hwaddr entry_end = entry->iova + entry->addr_mask;
2020 IOMMUTLBEntry tmp = *entry;
2022 if (event->type == IOMMU_NOTIFIER_UNMAP) {
2023 assert(entry->perm == IOMMU_NONE);
2027 * Skip the notification if the notification does not overlap
2028 * with registered range.
2030 if (notifier->start > entry_end || notifier->end < entry->iova) {
2034 if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
2035 /* Crop (iova, addr_mask) to range */
2036 tmp.iova = MAX(tmp.iova, notifier->start);
2037 tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova;
2039 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
2042 if (event->type & notifier->notifier_flags) {
2043 notifier->notify(notifier, &tmp);
2047 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier)
2049 IOMMUTLBEvent event;
2051 event.type = IOMMU_NOTIFIER_UNMAP;
2052 event.entry.target_as = &address_space_memory;
2053 event.entry.iova = notifier->start;
2054 event.entry.perm = IOMMU_NONE;
2055 event.entry.addr_mask = notifier->end - notifier->start;
2057 memory_region_notify_iommu_one(notifier, &event);
2060 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
2062 IOMMUTLBEvent event)
2064 IOMMUNotifier *iommu_notifier;
2066 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
2068 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
2069 if (iommu_notifier->iommu_idx == iommu_idx) {
2070 memory_region_notify_iommu_one(iommu_notifier, &event);
2075 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
2076 enum IOMMUMemoryRegionAttr attr,
2079 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2081 if (!imrc->get_attr) {
2085 return imrc->get_attr(iommu_mr, attr, data);
2088 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
2091 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2093 if (!imrc->attrs_to_index) {
2097 return imrc->attrs_to_index(iommu_mr, attrs);
2100 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2102 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2104 if (!imrc->num_indexes) {
2108 return imrc->num_indexes(iommu_mr);
2111 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr)
2113 if (!memory_region_is_ram(mr)) {
2119 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2120 RamDiscardManager *rdm)
2122 g_assert(memory_region_is_ram(mr));
2123 g_assert(!rdm || !mr->rdm);
2127 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
2128 const MemoryRegion *mr)
2130 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2132 g_assert(rdmc->get_min_granularity);
2133 return rdmc->get_min_granularity(rdm, mr);
2136 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
2137 const MemoryRegionSection *section)
2139 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2141 g_assert(rdmc->is_populated);
2142 return rdmc->is_populated(rdm, section);
2145 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
2146 MemoryRegionSection *section,
2147 ReplayRamPopulate replay_fn,
2150 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2152 g_assert(rdmc->replay_populated);
2153 return rdmc->replay_populated(rdm, section, replay_fn, opaque);
2156 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
2157 MemoryRegionSection *section,
2158 ReplayRamDiscard replay_fn,
2161 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2163 g_assert(rdmc->replay_discarded);
2164 rdmc->replay_discarded(rdm, section, replay_fn, opaque);
2167 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
2168 RamDiscardListener *rdl,
2169 MemoryRegionSection *section)
2171 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2173 g_assert(rdmc->register_listener);
2174 rdmc->register_listener(rdm, rdl, section);
2177 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
2178 RamDiscardListener *rdl)
2180 RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm);
2182 g_assert(rdmc->unregister_listener);
2183 rdmc->unregister_listener(rdm, rdl);
2186 /* Called with rcu_read_lock held. */
2187 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
2188 ram_addr_t *ram_addr, bool *read_only,
2189 bool *mr_has_discard_manager)
2193 hwaddr len = iotlb->addr_mask + 1;
2194 bool writable = iotlb->perm & IOMMU_WO;
2196 if (mr_has_discard_manager) {
2197 *mr_has_discard_manager = false;
2200 * The IOMMU TLB entry we have just covers translation through
2201 * this IOMMU to its immediate target. We need to translate
2202 * it the rest of the way through to memory.
2204 mr = address_space_translate(&address_space_memory, iotlb->translated_addr,
2205 &xlat, &len, writable, MEMTXATTRS_UNSPECIFIED);
2206 if (!memory_region_is_ram(mr)) {
2207 error_report("iommu map to non memory area %" HWADDR_PRIx "", xlat);
2209 } else if (memory_region_has_ram_discard_manager(mr)) {
2210 RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
2211 MemoryRegionSection tmp = {
2213 .offset_within_region = xlat,
2214 .size = int128_make64(len),
2216 if (mr_has_discard_manager) {
2217 *mr_has_discard_manager = true;
2220 * Malicious VMs can map memory into the IOMMU, which is expected
2221 * to remain discarded. vfio will pin all pages, populating memory.
2222 * Disallow that. vmstate priorities make sure any RamDiscardManager
2223 * were already restored before IOMMUs are restored.
2225 if (!ram_discard_manager_is_populated(rdm, &tmp)) {
2226 error_report("iommu map to discarded memory (e.g., unplugged via"
2227 " virtio-mem): %" HWADDR_PRIx "",
2228 iotlb->translated_addr);
2234 * Translation truncates length to the IOMMU page size,
2235 * check that it did not truncate too much.
2237 if (len & iotlb->addr_mask) {
2238 error_report("iommu has granularity incompatible with target AS");
2243 *vaddr = memory_region_get_ram_ptr(mr) + xlat;
2247 *ram_addr = memory_region_get_ram_addr(mr) + xlat;
2251 *read_only = !writable || mr->readonly;
2257 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2259 uint8_t mask = 1 << client;
2260 uint8_t old_logging;
2262 assert(client == DIRTY_MEMORY_VGA);
2263 old_logging = mr->vga_logging_count;
2264 mr->vga_logging_count += log ? 1 : -1;
2265 if (!!old_logging == !!mr->vga_logging_count) {
2269 memory_region_transaction_begin();
2270 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2271 memory_region_update_pending |= mr->enabled;
2272 memory_region_transaction_commit();
2275 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2278 assert(mr->ram_block);
2279 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2281 memory_region_get_dirty_log_mask(mr));
2285 * If memory region `mr' is NULL, do global sync. Otherwise, sync
2286 * dirty bitmap for the specified memory region.
2288 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr, bool last_stage)
2290 MemoryListener *listener;
2295 /* If the same address space has multiple log_sync listeners, we
2296 * visit that address space's FlatView multiple times. But because
2297 * log_sync listeners are rare, it's still cheaper than walking each
2298 * address space once.
2300 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2301 if (listener->log_sync) {
2302 as = listener->address_space;
2303 view = address_space_get_flatview(as);
2304 FOR_EACH_FLAT_RANGE(fr, view) {
2305 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2306 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2307 listener->log_sync(listener, &mrs);
2310 flatview_unref(view);
2311 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0);
2312 } else if (listener->log_sync_global) {
2314 * No matter whether MR is specified, what we can do here
2315 * is to do a global sync, because we are not capable to
2316 * sync in a finer granularity.
2318 listener->log_sync_global(listener, last_stage);
2319 trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1);
2324 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2327 MemoryRegionSection mrs;
2328 MemoryListener *listener;
2332 hwaddr sec_start, sec_end, sec_size;
2334 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2335 if (!listener->log_clear) {
2338 as = listener->address_space;
2339 view = address_space_get_flatview(as);
2340 FOR_EACH_FLAT_RANGE(fr, view) {
2341 if (!fr->dirty_log_mask || fr->mr != mr) {
2343 * Clear dirty bitmap operation only applies to those
2344 * regions whose dirty logging is at least enabled
2349 mrs = section_from_flat_range(fr, view);
2351 sec_start = MAX(mrs.offset_within_region, start);
2352 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2353 sec_end = MIN(sec_end, start + len);
2355 if (sec_start >= sec_end) {
2357 * If this memory region section has no intersection
2358 * with the requested range, skip.
2363 /* Valid case; shrink the section if needed */
2364 mrs.offset_within_address_space +=
2365 sec_start - mrs.offset_within_region;
2366 mrs.offset_within_region = sec_start;
2367 sec_size = sec_end - sec_start;
2368 mrs.size = int128_make64(sec_size);
2369 listener->log_clear(listener, &mrs);
2371 flatview_unref(view);
2375 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2380 DirtyBitmapSnapshot *snapshot;
2381 assert(mr->ram_block);
2382 memory_region_sync_dirty_bitmap(mr, false);
2383 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2384 memory_global_after_dirty_log_sync();
2388 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2389 hwaddr addr, hwaddr size)
2391 assert(mr->ram_block);
2392 return cpu_physical_memory_snapshot_get_dirty(snap,
2393 memory_region_get_ram_addr(mr) + addr, size);
2396 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2398 if (mr->readonly != readonly) {
2399 memory_region_transaction_begin();
2400 mr->readonly = readonly;
2401 memory_region_update_pending |= mr->enabled;
2402 memory_region_transaction_commit();
2406 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2408 if (mr->nonvolatile != nonvolatile) {
2409 memory_region_transaction_begin();
2410 mr->nonvolatile = nonvolatile;
2411 memory_region_update_pending |= mr->enabled;
2412 memory_region_transaction_commit();
2416 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2418 if (mr->romd_mode != romd_mode) {
2419 memory_region_transaction_begin();
2420 mr->romd_mode = romd_mode;
2421 memory_region_update_pending |= mr->enabled;
2422 memory_region_transaction_commit();
2426 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2427 hwaddr size, unsigned client)
2429 assert(mr->ram_block);
2430 cpu_physical_memory_test_and_clear_dirty(
2431 memory_region_get_ram_addr(mr) + addr, size, client);
2434 int memory_region_get_fd(MemoryRegion *mr)
2436 RCU_READ_LOCK_GUARD();
2440 return mr->ram_block->fd;
2443 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2445 uint64_t offset = 0;
2447 RCU_READ_LOCK_GUARD();
2449 offset += mr->alias_offset;
2452 assert(mr->ram_block);
2453 return qemu_map_ram_ptr(mr->ram_block, offset);
2456 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2460 block = qemu_ram_block_from_host(ptr, false, offset);
2468 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2470 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2473 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2475 assert(mr->ram_block);
2477 qemu_ram_resize(mr->ram_block, newsize, errp);
2480 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size)
2482 if (mr->ram_block) {
2483 qemu_ram_msync(mr->ram_block, addr, size);
2487 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size)
2490 * Might be extended case needed to cover
2491 * different types of memory regions
2493 if (mr->dirty_log_mask) {
2494 memory_region_msync(mr, addr, size);
2499 * Call proper memory listeners about the change on the newly
2500 * added/removed CoalescedMemoryRange.
2502 static void memory_region_update_coalesced_range(MemoryRegion *mr,
2503 CoalescedMemoryRange *cmr,
2510 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2511 view = address_space_get_flatview(as);
2512 FOR_EACH_FLAT_RANGE(fr, view) {
2514 flat_range_coalesced_io_notify(fr, as, cmr, add);
2517 flatview_unref(view);
2521 void memory_region_set_coalescing(MemoryRegion *mr)
2523 memory_region_clear_coalescing(mr);
2524 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2527 void memory_region_add_coalescing(MemoryRegion *mr,
2531 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2533 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2534 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2535 memory_region_update_coalesced_range(mr, cmr, true);
2536 memory_region_set_flush_coalesced(mr);
2539 void memory_region_clear_coalescing(MemoryRegion *mr)
2541 CoalescedMemoryRange *cmr;
2543 if (QTAILQ_EMPTY(&mr->coalesced)) {
2547 qemu_flush_coalesced_mmio_buffer();
2548 mr->flush_coalesced_mmio = false;
2550 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2551 cmr = QTAILQ_FIRST(&mr->coalesced);
2552 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2553 memory_region_update_coalesced_range(mr, cmr, false);
2558 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2560 mr->flush_coalesced_mmio = true;
2563 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2565 qemu_flush_coalesced_mmio_buffer();
2566 if (QTAILQ_EMPTY(&mr->coalesced)) {
2567 mr->flush_coalesced_mmio = false;
2571 void memory_region_add_eventfd(MemoryRegion *mr,
2578 MemoryRegionIoeventfd mrfd = {
2579 .addr.start = int128_make64(addr),
2580 .addr.size = int128_make64(size),
2581 .match_data = match_data,
2588 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2590 memory_region_transaction_begin();
2591 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2592 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2597 mr->ioeventfds = g_realloc(mr->ioeventfds,
2598 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2599 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2600 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2601 mr->ioeventfds[i] = mrfd;
2602 ioeventfd_update_pending |= mr->enabled;
2603 memory_region_transaction_commit();
2606 void memory_region_del_eventfd(MemoryRegion *mr,
2613 MemoryRegionIoeventfd mrfd = {
2614 .addr.start = int128_make64(addr),
2615 .addr.size = int128_make64(size),
2616 .match_data = match_data,
2623 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2625 memory_region_transaction_begin();
2626 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2627 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2631 assert(i != mr->ioeventfd_nb);
2632 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2633 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2635 mr->ioeventfds = g_realloc(mr->ioeventfds,
2636 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2637 ioeventfd_update_pending |= mr->enabled;
2638 memory_region_transaction_commit();
2641 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2643 MemoryRegion *mr = subregion->container;
2644 MemoryRegion *other;
2646 memory_region_transaction_begin();
2648 memory_region_ref(subregion);
2649 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2650 if (subregion->priority >= other->priority) {
2651 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2655 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2657 memory_region_update_pending |= mr->enabled && subregion->enabled;
2658 memory_region_transaction_commit();
2661 static void memory_region_add_subregion_common(MemoryRegion *mr,
2663 MemoryRegion *subregion)
2665 MemoryRegion *alias;
2667 assert(!subregion->container);
2668 subregion->container = mr;
2669 for (alias = subregion->alias; alias; alias = alias->alias) {
2670 alias->mapped_via_alias++;
2672 subregion->addr = offset;
2673 memory_region_update_container_subregions(subregion);
2676 void memory_region_add_subregion(MemoryRegion *mr,
2678 MemoryRegion *subregion)
2680 subregion->priority = 0;
2681 memory_region_add_subregion_common(mr, offset, subregion);
2684 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2686 MemoryRegion *subregion,
2689 subregion->priority = priority;
2690 memory_region_add_subregion_common(mr, offset, subregion);
2693 void memory_region_del_subregion(MemoryRegion *mr,
2694 MemoryRegion *subregion)
2696 MemoryRegion *alias;
2698 memory_region_transaction_begin();
2699 assert(subregion->container == mr);
2700 subregion->container = NULL;
2701 for (alias = subregion->alias; alias; alias = alias->alias) {
2702 alias->mapped_via_alias--;
2703 assert(alias->mapped_via_alias >= 0);
2705 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2706 memory_region_unref(subregion);
2707 memory_region_update_pending |= mr->enabled && subregion->enabled;
2708 memory_region_transaction_commit();
2711 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2713 if (enabled == mr->enabled) {
2716 memory_region_transaction_begin();
2717 mr->enabled = enabled;
2718 memory_region_update_pending = true;
2719 memory_region_transaction_commit();
2722 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2724 Int128 s = int128_make64(size);
2726 if (size == UINT64_MAX) {
2729 if (int128_eq(s, mr->size)) {
2732 memory_region_transaction_begin();
2734 memory_region_update_pending = true;
2735 memory_region_transaction_commit();
2738 static void memory_region_readd_subregion(MemoryRegion *mr)
2740 MemoryRegion *container = mr->container;
2743 memory_region_transaction_begin();
2744 memory_region_ref(mr);
2745 memory_region_del_subregion(container, mr);
2746 memory_region_add_subregion_common(container, mr->addr, mr);
2747 memory_region_unref(mr);
2748 memory_region_transaction_commit();
2752 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2754 if (addr != mr->addr) {
2756 memory_region_readd_subregion(mr);
2760 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2764 if (offset == mr->alias_offset) {
2768 memory_region_transaction_begin();
2769 mr->alias_offset = offset;
2770 memory_region_update_pending |= mr->enabled;
2771 memory_region_transaction_commit();
2774 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable)
2776 if (unmergeable == mr->unmergeable) {
2780 memory_region_transaction_begin();
2781 mr->unmergeable = unmergeable;
2782 memory_region_update_pending |= mr->enabled;
2783 memory_region_transaction_commit();
2786 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2791 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2793 const AddrRange *addr = addr_;
2794 const FlatRange *fr = fr_;
2796 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2798 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2804 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2806 return bsearch(&addr, view->ranges, view->nr,
2807 sizeof(FlatRange), cmp_flatrange_addr);
2810 bool memory_region_is_mapped(MemoryRegion *mr)
2812 return !!mr->container || mr->mapped_via_alias;
2815 /* Same as memory_region_find, but it does not add a reference to the
2816 * returned region. It must be called from an RCU critical section.
2818 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2819 hwaddr addr, uint64_t size)
2821 MemoryRegionSection ret = { .mr = NULL };
2829 for (root = mr; root->container; ) {
2830 root = root->container;
2834 as = memory_region_to_address_space(root);
2838 range = addrrange_make(int128_make64(addr), int128_make64(size));
2840 view = address_space_to_flatview(as);
2841 fr = flatview_lookup(view, range);
2846 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2852 range = addrrange_intersection(range, fr->addr);
2853 ret.offset_within_region = fr->offset_in_region;
2854 ret.offset_within_region += int128_get64(int128_sub(range.start,
2856 ret.size = range.size;
2857 ret.offset_within_address_space = int128_get64(range.start);
2858 ret.readonly = fr->readonly;
2859 ret.nonvolatile = fr->nonvolatile;
2863 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2864 hwaddr addr, uint64_t size)
2866 MemoryRegionSection ret;
2867 RCU_READ_LOCK_GUARD();
2868 ret = memory_region_find_rcu(mr, addr, size);
2870 memory_region_ref(ret.mr);
2875 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s)
2877 MemoryRegionSection *tmp = g_new(MemoryRegionSection, 1);
2881 memory_region_ref(tmp->mr);
2884 bool ret = flatview_ref(tmp->fv);
2891 void memory_region_section_free_copy(MemoryRegionSection *s)
2894 flatview_unref(s->fv);
2897 memory_region_unref(s->mr);
2902 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2906 RCU_READ_LOCK_GUARD();
2907 mr = memory_region_find_rcu(container, addr, 1).mr;
2908 return mr && mr != container;
2911 void memory_global_dirty_log_sync(bool last_stage)
2913 memory_region_sync_dirty_bitmap(NULL, last_stage);
2916 void memory_global_after_dirty_log_sync(void)
2918 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2922 * Dirty track stop flags that are postponed due to VM being stopped. Should
2923 * only be used within vmstate_change hook.
2925 static unsigned int postponed_stop_flags;
2926 static VMChangeStateEntry *vmstate_change;
2927 static void memory_global_dirty_log_stop_postponed_run(void);
2929 void memory_global_dirty_log_start(unsigned int flags)
2931 unsigned int old_flags;
2933 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
2935 if (vmstate_change) {
2936 /* If there is postponed stop(), operate on it first */
2937 postponed_stop_flags &= ~flags;
2938 memory_global_dirty_log_stop_postponed_run();
2941 flags &= ~global_dirty_tracking;
2946 old_flags = global_dirty_tracking;
2947 global_dirty_tracking |= flags;
2948 trace_global_dirty_changed(global_dirty_tracking);
2951 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2952 memory_region_transaction_begin();
2953 memory_region_update_pending = true;
2954 memory_region_transaction_commit();
2958 static void memory_global_dirty_log_do_stop(unsigned int flags)
2960 assert(flags && !(flags & (~GLOBAL_DIRTY_MASK)));
2961 assert((global_dirty_tracking & flags) == flags);
2962 global_dirty_tracking &= ~flags;
2964 trace_global_dirty_changed(global_dirty_tracking);
2966 if (!global_dirty_tracking) {
2967 memory_region_transaction_begin();
2968 memory_region_update_pending = true;
2969 memory_region_transaction_commit();
2970 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2975 * Execute the postponed dirty log stop operations if there is, then reset
2976 * everything (including the flags and the vmstate change hook).
2978 static void memory_global_dirty_log_stop_postponed_run(void)
2980 /* This must be called with the vmstate handler registered */
2981 assert(vmstate_change);
2983 /* Note: postponed_stop_flags can be cleared in log start routine */
2984 if (postponed_stop_flags) {
2985 memory_global_dirty_log_do_stop(postponed_stop_flags);
2986 postponed_stop_flags = 0;
2989 qemu_del_vm_change_state_handler(vmstate_change);
2990 vmstate_change = NULL;
2993 static void memory_vm_change_state_handler(void *opaque, bool running,
2997 memory_global_dirty_log_stop_postponed_run();
3001 void memory_global_dirty_log_stop(unsigned int flags)
3003 if (!runstate_is_running()) {
3004 /* Postpone the dirty log stop, e.g., to when VM starts again */
3005 if (vmstate_change) {
3006 /* Batch with previous postponed flags */
3007 postponed_stop_flags |= flags;
3009 postponed_stop_flags = flags;
3010 vmstate_change = qemu_add_vm_change_state_handler(
3011 memory_vm_change_state_handler, NULL);
3016 memory_global_dirty_log_do_stop(flags);
3019 static void listener_add_address_space(MemoryListener *listener,
3025 if (listener->begin) {
3026 listener->begin(listener);
3028 if (global_dirty_tracking) {
3029 if (listener->log_global_start) {
3030 listener->log_global_start(listener);
3034 view = address_space_get_flatview(as);
3035 FOR_EACH_FLAT_RANGE(fr, view) {
3036 MemoryRegionSection section = section_from_flat_range(fr, view);
3038 if (listener->region_add) {
3039 listener->region_add(listener, §ion);
3041 if (fr->dirty_log_mask && listener->log_start) {
3042 listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
3045 if (listener->commit) {
3046 listener->commit(listener);
3048 flatview_unref(view);
3051 static void listener_del_address_space(MemoryListener *listener,
3057 if (listener->begin) {
3058 listener->begin(listener);
3060 view = address_space_get_flatview(as);
3061 FOR_EACH_FLAT_RANGE(fr, view) {
3062 MemoryRegionSection section = section_from_flat_range(fr, view);
3064 if (fr->dirty_log_mask && listener->log_stop) {
3065 listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
3067 if (listener->region_del) {
3068 listener->region_del(listener, §ion);
3071 if (listener->commit) {
3072 listener->commit(listener);
3074 flatview_unref(view);
3077 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
3079 MemoryListener *other = NULL;
3081 /* Only one of them can be defined for a listener */
3082 assert(!(listener->log_sync && listener->log_sync_global));
3084 listener->address_space = as;
3085 if (QTAILQ_EMPTY(&memory_listeners)
3086 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
3087 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
3089 QTAILQ_FOREACH(other, &memory_listeners, link) {
3090 if (listener->priority < other->priority) {
3094 QTAILQ_INSERT_BEFORE(other, listener, link);
3097 if (QTAILQ_EMPTY(&as->listeners)
3098 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
3099 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
3101 QTAILQ_FOREACH(other, &as->listeners, link_as) {
3102 if (listener->priority < other->priority) {
3106 QTAILQ_INSERT_BEFORE(other, listener, link_as);
3109 listener_add_address_space(listener, as);
3111 if (listener->eventfd_add || listener->eventfd_del) {
3112 as->ioeventfd_notifiers++;
3116 void memory_listener_unregister(MemoryListener *listener)
3118 if (!listener->address_space) {
3122 if (listener->eventfd_add || listener->eventfd_del) {
3123 listener->address_space->ioeventfd_notifiers--;
3126 listener_del_address_space(listener, listener->address_space);
3127 QTAILQ_REMOVE(&memory_listeners, listener, link);
3128 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
3129 listener->address_space = NULL;
3132 void address_space_remove_listeners(AddressSpace *as)
3134 while (!QTAILQ_EMPTY(&as->listeners)) {
3135 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
3139 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
3141 memory_region_ref(root);
3143 as->current_map = NULL;
3144 as->ioeventfd_nb = 0;
3145 as->ioeventfds = NULL;
3146 QTAILQ_INIT(&as->listeners);
3147 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
3148 as->name = g_strdup(name ? name : "anonymous");
3149 address_space_update_topology(as);
3150 address_space_update_ioeventfds(as);
3153 static void do_address_space_destroy(AddressSpace *as)
3155 assert(QTAILQ_EMPTY(&as->listeners));
3157 flatview_unref(as->current_map);
3159 g_free(as->ioeventfds);
3160 memory_region_unref(as->root);
3163 void address_space_destroy(AddressSpace *as)
3165 MemoryRegion *root = as->root;
3167 /* Flush out anything from MemoryListeners listening in on this */
3168 memory_region_transaction_begin();
3170 memory_region_transaction_commit();
3171 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
3173 /* At this point, as->dispatch and as->current_map are dummy
3174 * entries that the guest should never use. Wait for the old
3175 * values to expire before freeing the data.
3178 call_rcu(as, do_address_space_destroy, rcu);
3181 static const char *memory_region_type(MemoryRegion *mr)
3184 return memory_region_type(mr->alias);
3186 if (memory_region_is_ram_device(mr)) {
3188 } else if (memory_region_is_romd(mr)) {
3190 } else if (memory_region_is_rom(mr)) {
3192 } else if (memory_region_is_ram(mr)) {
3199 typedef struct MemoryRegionList MemoryRegionList;
3201 struct MemoryRegionList {
3202 const MemoryRegion *mr;
3203 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
3206 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
3208 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
3209 int128_sub((size), int128_one())) : 0)
3210 #define MTREE_INDENT " "
3212 static void mtree_expand_owner(const char *label, Object *obj)
3214 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
3216 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
3217 if (dev && dev->id) {
3218 qemu_printf(" id=%s", dev->id);
3220 char *canonical_path = object_get_canonical_path(obj);
3221 if (canonical_path) {
3222 qemu_printf(" path=%s", canonical_path);
3223 g_free(canonical_path);
3225 qemu_printf(" type=%s", object_get_typename(obj));
3231 static void mtree_print_mr_owner(const MemoryRegion *mr)
3233 Object *owner = mr->owner;
3234 Object *parent = memory_region_owner((MemoryRegion *)mr);
3236 if (!owner && !parent) {
3237 qemu_printf(" orphan");
3241 mtree_expand_owner("owner", owner);
3243 if (parent && parent != owner) {
3244 mtree_expand_owner("parent", parent);
3248 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
3250 MemoryRegionListHead *alias_print_queue,
3251 bool owner, bool display_disabled)
3253 MemoryRegionList *new_ml, *ml, *next_ml;
3254 MemoryRegionListHead submr_print_queue;
3255 const MemoryRegion *submr;
3257 hwaddr cur_start, cur_end;
3263 cur_start = base + mr->addr;
3264 cur_end = cur_start + MR_SIZE(mr->size);
3267 * Try to detect overflow of memory region. This should never
3268 * happen normally. When it happens, we dump something to warn the
3269 * user who is observing this.
3271 if (cur_start < base || cur_end < cur_start) {
3272 qemu_printf("[DETECTED OVERFLOW!] ");
3278 /* check if the alias is already in the queue */
3279 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
3280 if (ml->mr == mr->alias) {
3286 ml = g_new(MemoryRegionList, 1);
3288 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
3290 if (mr->enabled || display_disabled) {
3291 for (i = 0; i < level; i++) {
3292 qemu_printf(MTREE_INDENT);
3294 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3295 " (prio %d, %s%s): alias %s @%s " HWADDR_FMT_plx
3296 "-" HWADDR_FMT_plx "%s",
3299 mr->nonvolatile ? "nv-" : "",
3300 memory_region_type((MemoryRegion *)mr),
3301 memory_region_name(mr),
3302 memory_region_name(mr->alias),
3304 mr->alias_offset + MR_SIZE(mr->size),
3305 mr->enabled ? "" : " [disabled]");
3307 mtree_print_mr_owner(mr);
3312 if (mr->enabled || display_disabled) {
3313 for (i = 0; i < level; i++) {
3314 qemu_printf(MTREE_INDENT);
3316 qemu_printf(HWADDR_FMT_plx "-" HWADDR_FMT_plx
3317 " (prio %d, %s%s): %s%s",
3320 mr->nonvolatile ? "nv-" : "",
3321 memory_region_type((MemoryRegion *)mr),
3322 memory_region_name(mr),
3323 mr->enabled ? "" : " [disabled]");
3325 mtree_print_mr_owner(mr);
3331 QTAILQ_INIT(&submr_print_queue);
3333 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
3334 new_ml = g_new(MemoryRegionList, 1);
3336 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3337 if (new_ml->mr->addr < ml->mr->addr ||
3338 (new_ml->mr->addr == ml->mr->addr &&
3339 new_ml->mr->priority > ml->mr->priority)) {
3340 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
3346 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
3350 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3351 mtree_print_mr(ml->mr, level + 1, cur_start,
3352 alias_print_queue, owner, display_disabled);
3355 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3360 struct FlatViewInfo {
3367 static void mtree_print_flatview(gpointer key, gpointer value,
3370 FlatView *view = key;
3371 GArray *fv_address_spaces = value;
3372 struct FlatViewInfo *fvi = user_data;
3373 FlatRange *range = &view->ranges[0];
3379 qemu_printf("FlatView #%d\n", fvi->counter);
3382 for (i = 0; i < fv_address_spaces->len; ++i) {
3383 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3384 qemu_printf(" AS \"%s\", root: %s",
3385 as->name, memory_region_name(as->root));
3386 if (as->root->alias) {
3387 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3392 qemu_printf(" Root memory region: %s\n",
3393 view->root ? memory_region_name(view->root) : "(none)");
3396 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3402 if (range->offset_in_region) {
3403 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3404 " (prio %d, %s%s): %s @" HWADDR_FMT_plx,
3405 int128_get64(range->addr.start),
3406 int128_get64(range->addr.start)
3407 + MR_SIZE(range->addr.size),
3409 range->nonvolatile ? "nv-" : "",
3410 range->readonly ? "rom" : memory_region_type(mr),
3411 memory_region_name(mr),
3412 range->offset_in_region);
3414 qemu_printf(MTREE_INDENT HWADDR_FMT_plx "-" HWADDR_FMT_plx
3415 " (prio %d, %s%s): %s",
3416 int128_get64(range->addr.start),
3417 int128_get64(range->addr.start)
3418 + MR_SIZE(range->addr.size),
3420 range->nonvolatile ? "nv-" : "",
3421 range->readonly ? "rom" : memory_region_type(mr),
3422 memory_region_name(mr));
3425 mtree_print_mr_owner(mr);
3429 for (i = 0; i < fv_address_spaces->len; ++i) {
3430 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3431 if (fvi->ac->has_memory(current_machine, as,
3432 int128_get64(range->addr.start),
3433 MR_SIZE(range->addr.size) + 1)) {
3434 qemu_printf(" %s", fvi->ac->name);
3442 #if !defined(CONFIG_USER_ONLY)
3443 if (fvi->dispatch_tree && view->root) {
3444 mtree_print_dispatch(view->dispatch, view->root);
3451 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3454 FlatView *view = key;
3455 GArray *fv_address_spaces = value;
3457 g_array_unref(fv_address_spaces);
3458 flatview_unref(view);
3463 static void mtree_info_flatview(bool dispatch_tree, bool owner)
3465 struct FlatViewInfo fvi = {
3467 .dispatch_tree = dispatch_tree,
3472 GArray *fv_address_spaces;
3473 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3474 AccelClass *ac = ACCEL_GET_CLASS(current_accel());
3476 if (ac->has_memory) {
3480 /* Gather all FVs in one table */
3481 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3482 view = address_space_get_flatview(as);
3484 fv_address_spaces = g_hash_table_lookup(views, view);
3485 if (!fv_address_spaces) {
3486 fv_address_spaces = g_array_new(false, false, sizeof(as));
3487 g_hash_table_insert(views, view, fv_address_spaces);
3490 g_array_append_val(fv_address_spaces, as);
3494 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3497 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3498 g_hash_table_unref(views);
3501 struct AddressSpaceInfo {
3502 MemoryRegionListHead *ml_head;
3507 /* Returns negative value if a < b; zero if a = b; positive value if a > b. */
3508 static gint address_space_compare_name(gconstpointer a, gconstpointer b)
3510 const AddressSpace *as_a = a;
3511 const AddressSpace *as_b = b;
3513 return g_strcmp0(as_a->name, as_b->name);
3516 static void mtree_print_as_name(gpointer data, gpointer user_data)
3518 AddressSpace *as = data;
3520 qemu_printf("address-space: %s\n", as->name);
3523 static void mtree_print_as(gpointer key, gpointer value, gpointer user_data)
3525 MemoryRegion *mr = key;
3526 GSList *as_same_root_mr_list = value;
3527 struct AddressSpaceInfo *asi = user_data;
3529 g_slist_foreach(as_same_root_mr_list, mtree_print_as_name, NULL);
3530 mtree_print_mr(mr, 1, 0, asi->ml_head, asi->owner, asi->disabled);
3534 static gboolean mtree_info_as_free(gpointer key, gpointer value,
3537 GSList *as_same_root_mr_list = value;
3539 g_slist_free(as_same_root_mr_list);
3544 static void mtree_info_as(bool dispatch_tree, bool owner, bool disabled)
3546 MemoryRegionListHead ml_head;
3547 MemoryRegionList *ml, *ml2;
3549 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3550 GSList *as_same_root_mr_list;
3551 struct AddressSpaceInfo asi = {
3552 .ml_head = &ml_head,
3554 .disabled = disabled,
3557 QTAILQ_INIT(&ml_head);
3559 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3560 /* Create hashtable, key=AS root MR, value = list of AS */
3561 as_same_root_mr_list = g_hash_table_lookup(views, as->root);
3562 as_same_root_mr_list = g_slist_insert_sorted(as_same_root_mr_list, as,
3563 address_space_compare_name);
3564 g_hash_table_insert(views, as->root, as_same_root_mr_list);
3567 /* print address spaces */
3568 g_hash_table_foreach(views, mtree_print_as, &asi);
3569 g_hash_table_foreach_remove(views, mtree_info_as_free, 0);
3570 g_hash_table_unref(views);
3572 /* print aliased regions */
3573 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3574 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3575 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner, disabled);
3579 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3584 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled)
3587 mtree_info_flatview(dispatch_tree, owner);
3589 mtree_info_as(dispatch_tree, owner, disabled);
3593 void memory_region_init_ram(MemoryRegion *mr,
3599 DeviceState *owner_dev;
3602 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3604 error_propagate(errp, err);
3607 /* This will assert if owner is neither NULL nor a DeviceState.
3608 * We only want the owner here for the purposes of defining a
3609 * unique name for migration. TODO: Ideally we should implement
3610 * a naming scheme for Objects which are not DeviceStates, in
3611 * which case we can relax this restriction.
3613 owner_dev = DEVICE(owner);
3614 vmstate_register_ram(mr, owner_dev);
3617 void memory_region_init_rom(MemoryRegion *mr,
3623 DeviceState *owner_dev;
3626 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3628 error_propagate(errp, err);
3631 /* This will assert if owner is neither NULL nor a DeviceState.
3632 * We only want the owner here for the purposes of defining a
3633 * unique name for migration. TODO: Ideally we should implement
3634 * a naming scheme for Objects which are not DeviceStates, in
3635 * which case we can relax this restriction.
3637 owner_dev = DEVICE(owner);
3638 vmstate_register_ram(mr, owner_dev);
3641 void memory_region_init_rom_device(MemoryRegion *mr,
3643 const MemoryRegionOps *ops,
3649 DeviceState *owner_dev;
3652 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3655 error_propagate(errp, err);
3658 /* This will assert if owner is neither NULL nor a DeviceState.
3659 * We only want the owner here for the purposes of defining a
3660 * unique name for migration. TODO: Ideally we should implement
3661 * a naming scheme for Objects which are not DeviceStates, in
3662 * which case we can relax this restriction.
3664 owner_dev = DEVICE(owner);
3665 vmstate_register_ram(mr, owner_dev);
3669 * Support system builds with CONFIG_FUZZ using a weak symbol and a stub for
3670 * the fuzz_dma_read_cb callback
3673 void __attribute__((weak)) fuzz_dma_read_cb(size_t addr,
3680 static const TypeInfo memory_region_info = {
3681 .parent = TYPE_OBJECT,
3682 .name = TYPE_MEMORY_REGION,
3683 .class_size = sizeof(MemoryRegionClass),
3684 .instance_size = sizeof(MemoryRegion),
3685 .instance_init = memory_region_initfn,
3686 .instance_finalize = memory_region_finalize,
3689 static const TypeInfo iommu_memory_region_info = {
3690 .parent = TYPE_MEMORY_REGION,
3691 .name = TYPE_IOMMU_MEMORY_REGION,
3692 .class_size = sizeof(IOMMUMemoryRegionClass),
3693 .instance_size = sizeof(IOMMUMemoryRegion),
3694 .instance_init = iommu_memory_region_initfn,
3698 static const TypeInfo ram_discard_manager_info = {
3699 .parent = TYPE_INTERFACE,
3700 .name = TYPE_RAM_DISCARD_MANAGER,
3701 .class_size = sizeof(RamDiscardManagerClass),
3704 static void memory_register_types(void)
3706 type_register_static(&memory_region_info);
3707 type_register_static(&iommu_memory_region_info);
3708 type_register_static(&ram_discard_manager_info);
3711 type_init(memory_register_types)