2 * Physical memory management API
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #ifndef CONFIG_USER_ONLY
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/memop.h"
23 #include "exec/ramlist.h"
24 #include "qemu/bswap.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/notify.h"
28 #include "qom/object.h"
31 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
33 #define MAX_PHYS_ADDR_SPACE_BITS 62
34 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
36 #define TYPE_MEMORY_REGION "memory-region"
37 DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
40 #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
41 typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
42 DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
43 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
46 void fuzz_dma_read_cb(size_t addr,
50 static inline void fuzz_dma_read_cb(size_t addr,
58 extern bool global_dirty_log;
60 typedef struct MemoryRegionOps MemoryRegionOps;
62 struct ReservedRegion {
68 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
70 /* See address_space_translate: bit 0 is read, bit 1 is write. */
78 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
80 struct IOMMUTLBEntry {
81 AddressSpace *target_as;
83 hwaddr translated_addr;
84 hwaddr addr_mask; /* 0xfff = 4k translation */
85 IOMMUAccessFlags perm;
89 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
90 * register with one or multiple IOMMU Notifier capability bit(s).
93 IOMMU_NOTIFIER_NONE = 0,
94 /* Notify cache invalidations */
95 IOMMU_NOTIFIER_UNMAP = 0x1,
96 /* Notify entry changes (newly created entries) */
97 IOMMU_NOTIFIER_MAP = 0x2,
98 /* Notify changes on device IOTLB entries */
99 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
102 #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
103 #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
104 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
105 IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
107 struct IOMMUNotifier;
108 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
109 IOMMUTLBEntry *data);
111 struct IOMMUNotifier {
113 IOMMUNotifierFlag notifier_flags;
114 /* Notify for address space range start <= addr <= end */
118 QLIST_ENTRY(IOMMUNotifier) node;
120 typedef struct IOMMUNotifier IOMMUNotifier;
122 typedef struct IOMMUTLBEvent {
123 IOMMUNotifierFlag type;
127 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
128 #define RAM_PREALLOC (1 << 0)
130 /* RAM is mmap-ed with MAP_SHARED */
131 #define RAM_SHARED (1 << 1)
133 /* Only a portion of RAM (used_length) is actually used, and migrated.
134 * Resizing RAM while migrating can result in the migration being canceled.
136 #define RAM_RESIZEABLE (1 << 2)
138 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
139 * zero the page and wake waiting processes.
140 * (Set during postcopy)
142 #define RAM_UF_ZEROPAGE (1 << 3)
144 /* RAM can be migrated */
145 #define RAM_MIGRATABLE (1 << 4)
147 /* RAM is a persistent kind memory */
148 #define RAM_PMEM (1 << 5)
152 * UFFDIO_WRITEPROTECT is used on this RAMBlock to
153 * support 'write-tracking' migration type.
154 * Implies ram_state->ram_wt_enabled.
156 #define RAM_UF_WRITEPROTECT (1 << 6)
158 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
159 IOMMUNotifierFlag flags,
160 hwaddr start, hwaddr end,
164 n->notifier_flags = flags;
167 n->iommu_idx = iommu_idx;
171 * Memory region callbacks
173 struct MemoryRegionOps {
174 /* Read from the memory region. @addr is relative to @mr; @size is
176 uint64_t (*read)(void *opaque,
179 /* Write to the memory region. @addr is relative to @mr; @size is
181 void (*write)(void *opaque,
186 MemTxResult (*read_with_attrs)(void *opaque,
191 MemTxResult (*write_with_attrs)(void *opaque,
197 enum device_endian endianness;
198 /* Guest-visible constraints: */
200 /* If nonzero, specify bounds on access sizes beyond which a machine
203 unsigned min_access_size;
204 unsigned max_access_size;
205 /* If true, unaligned accesses are supported. Otherwise unaligned
206 * accesses throw machine checks.
210 * If present, and returns #false, the transaction is not accepted
211 * by the device (and results in machine dependent behaviour such
212 * as a machine check exception).
214 bool (*accepts)(void *opaque, hwaddr addr,
215 unsigned size, bool is_write,
218 /* Internal implementation constraints: */
220 /* If nonzero, specifies the minimum size implemented. Smaller sizes
221 * will be rounded upwards and a partial result will be returned.
223 unsigned min_access_size;
224 /* If nonzero, specifies the maximum size implemented. Larger sizes
225 * will be done as a series of accesses with smaller sizes.
227 unsigned max_access_size;
228 /* If true, unaligned accesses are supported. Otherwise all accesses
229 * are converted to (possibly multiple) naturally aligned accesses.
235 typedef struct MemoryRegionClass {
237 ObjectClass parent_class;
241 enum IOMMUMemoryRegionAttr {
242 IOMMU_ATTR_SPAPR_TCE_FD
246 * IOMMUMemoryRegionClass:
248 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
249 * and provide an implementation of at least the @translate method here
250 * to handle requests to the memory region. Other methods are optional.
252 * The IOMMU implementation must use the IOMMU notifier infrastructure
253 * to report whenever mappings are changed, by calling
254 * memory_region_notify_iommu() (or, if necessary, by calling
255 * memory_region_notify_iommu_one() for each registered notifier).
257 * Conceptually an IOMMU provides a mapping from input address
258 * to an output TLB entry. If the IOMMU is aware of memory transaction
259 * attributes and the output TLB entry depends on the transaction
260 * attributes, we represent this using IOMMU indexes. Each index
261 * selects a particular translation table that the IOMMU has:
263 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
265 * @translate takes an input address and an IOMMU index
267 * and the mapping returned can only depend on the input address and the
270 * Most IOMMUs don't care about the transaction attributes and support
271 * only a single IOMMU index. A more complex IOMMU might have one index
272 * for secure transactions and one for non-secure transactions.
274 struct IOMMUMemoryRegionClass {
276 MemoryRegionClass parent_class;
282 * Return a TLB entry that contains a given address.
284 * The IOMMUAccessFlags indicated via @flag are optional and may
285 * be specified as IOMMU_NONE to indicate that the caller needs
286 * the full translation information for both reads and writes. If
287 * the access flags are specified then the IOMMU implementation
288 * may use this as an optimization, to stop doing a page table
289 * walk as soon as it knows that the requested permissions are not
290 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
291 * full page table walk and report the permissions in the returned
292 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
293 * return different mappings for reads and writes.)
295 * The returned information remains valid while the caller is
296 * holding the big QEMU lock or is inside an RCU critical section;
297 * if the caller wishes to cache the mapping beyond that it must
298 * register an IOMMU notifier so it can invalidate its cached
299 * information when the IOMMU mapping changes.
301 * @iommu: the IOMMUMemoryRegion
303 * @hwaddr: address to be translated within the memory region
305 * @flag: requested access permission
307 * @iommu_idx: IOMMU index for the translation
309 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
310 IOMMUAccessFlags flag, int iommu_idx);
312 * @get_min_page_size:
314 * Returns minimum supported page size in bytes.
316 * If this method is not provided then the minimum is assumed to
317 * be TARGET_PAGE_SIZE.
319 * @iommu: the IOMMUMemoryRegion
321 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
323 * @notify_flag_changed:
325 * Called when IOMMU Notifier flag changes (ie when the set of
326 * events which IOMMU users are requesting notification for changes).
327 * Optional method -- need not be provided if the IOMMU does not
328 * need to know exactly which events must be notified.
330 * @iommu: the IOMMUMemoryRegion
332 * @old_flags: events which previously needed to be notified
334 * @new_flags: events which now need to be notified
336 * Returns 0 on success, or a negative errno; in particular
337 * returns -EINVAL if the new flag bitmap is not supported by the
338 * IOMMU memory region. In case of failure, the error object
341 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
342 IOMMUNotifierFlag old_flags,
343 IOMMUNotifierFlag new_flags,
348 * Called to handle memory_region_iommu_replay().
350 * The default implementation of memory_region_iommu_replay() is to
351 * call the IOMMU translate method for every page in the address space
352 * with flag == IOMMU_NONE and then call the notifier if translate
353 * returns a valid mapping. If this method is implemented then it
354 * overrides the default behaviour, and must provide the full semantics
355 * of memory_region_iommu_replay(), by calling @notifier for every
356 * translation present in the IOMMU.
358 * Optional method -- an IOMMU only needs to provide this method
359 * if the default is inefficient or produces undesirable side effects.
361 * Note: this is not related to record-and-replay functionality.
363 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
368 * Get IOMMU misc attributes. This is an optional method that
369 * can be used to allow users of the IOMMU to get implementation-specific
370 * information. The IOMMU implements this method to handle calls
371 * by IOMMU users to memory_region_iommu_get_attr() by filling in
372 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
373 * the IOMMU supports. If the method is unimplemented then
374 * memory_region_iommu_get_attr() will always return -EINVAL.
376 * @iommu: the IOMMUMemoryRegion
378 * @attr: attribute being queried
380 * @data: memory to fill in with the attribute data
382 * Returns 0 on success, or a negative errno; in particular
383 * returns -EINVAL for unrecognized or unimplemented attribute types.
385 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
391 * Return the IOMMU index to use for a given set of transaction attributes.
393 * Optional method: if an IOMMU only supports a single IOMMU index then
394 * the default implementation of memory_region_iommu_attrs_to_index()
397 * The indexes supported by an IOMMU must be contiguous, starting at 0.
399 * @iommu: the IOMMUMemoryRegion
400 * @attrs: memory transaction attributes
402 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
407 * Return the number of IOMMU indexes this IOMMU supports.
409 * Optional method: if this method is not provided, then
410 * memory_region_iommu_num_indexes() will return 1, indicating that
411 * only a single IOMMU index is supported.
413 * @iommu: the IOMMUMemoryRegion
415 int (*num_indexes)(IOMMUMemoryRegion *iommu);
418 * @iommu_set_page_size_mask:
420 * Restrict the page size mask that can be supported with a given IOMMU
421 * memory region. Used for example to propagate host physical IOMMU page
422 * size mask limitations to the virtual IOMMU.
424 * Optional method: if this method is not provided, then the default global
427 * @iommu: the IOMMUMemoryRegion
429 * @page_size_mask: a bitmask of supported page sizes. At least one bit,
430 * representing the smallest page size, must be set. Additional set bits
431 * represent supported block sizes. For example a host physical IOMMU that
432 * uses page tables with a page size of 4kB, and supports 2MB and 4GB
433 * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
434 * block sizes is specified with mask 0xfffffffffffff000.
436 * Returns 0 on success, or a negative error. In case of failure, the error
437 * object must be created.
439 int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
440 uint64_t page_size_mask,
444 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
445 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
449 * A struct representing a memory region.
451 struct MemoryRegion {
456 /* The following fields should fit in a cache line */
460 bool readonly; /* For RAM regions */
463 bool flush_coalesced_mmio;
464 uint8_t dirty_log_mask;
469 const MemoryRegionOps *ops;
471 MemoryRegion *container;
474 void (*destructor)(MemoryRegion *mr);
479 bool warning_printed; /* For reservations */
480 uint8_t vga_logging_count;
484 QTAILQ_HEAD(, MemoryRegion) subregions;
485 QTAILQ_ENTRY(MemoryRegion) subregions_link;
486 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
488 unsigned ioeventfd_nb;
489 MemoryRegionIoeventfd *ioeventfds;
492 struct IOMMUMemoryRegion {
493 MemoryRegion parent_obj;
495 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
496 IOMMUNotifierFlag iommu_notify_flags;
499 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
500 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
503 * struct MemoryListener: callbacks structure for updates to the physical memory map
505 * Allows a component to adjust to changes in the guest-visible memory map.
506 * Use with memory_listener_register() and memory_listener_unregister().
508 struct MemoryListener {
512 * Called at the beginning of an address space update transaction.
513 * Followed by calls to #MemoryListener.region_add(),
514 * #MemoryListener.region_del(), #MemoryListener.region_nop(),
515 * #MemoryListener.log_start() and #MemoryListener.log_stop() in
516 * increasing address order.
518 * @listener: The #MemoryListener.
520 void (*begin)(MemoryListener *listener);
525 * Called at the end of an address space update transaction,
526 * after the last call to #MemoryListener.region_add(),
527 * #MemoryListener.region_del() or #MemoryListener.region_nop(),
528 * #MemoryListener.log_start() and #MemoryListener.log_stop().
530 * @listener: The #MemoryListener.
532 void (*commit)(MemoryListener *listener);
537 * Called during an address space update transaction,
538 * for a section of the address space that is new in this address space
539 * space since the last transaction.
541 * @listener: The #MemoryListener.
542 * @section: The new #MemoryRegionSection.
544 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
549 * Called during an address space update transaction,
550 * for a section of the address space that has disappeared in the address
551 * space since the last transaction.
553 * @listener: The #MemoryListener.
554 * @section: The old #MemoryRegionSection.
556 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
561 * Called during an address space update transaction,
562 * for a section of the address space that is in the same place in the address
563 * space as in the last transaction.
565 * @listener: The #MemoryListener.
566 * @section: The #MemoryRegionSection.
568 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
573 * Called during an address space update transaction, after
574 * one of #MemoryListener.region_add(),#MemoryListener.region_del() or
575 * #MemoryListener.region_nop(), if dirty memory logging clients have
576 * become active since the last transaction.
578 * @listener: The #MemoryListener.
579 * @section: The #MemoryRegionSection.
580 * @old: A bitmap of dirty memory logging clients that were active in
581 * the previous transaction.
582 * @new: A bitmap of dirty memory logging clients that are active in
583 * the current transaction.
585 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
591 * Called during an address space update transaction, after
592 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
593 * #MemoryListener.region_nop() and possibly after
594 * #MemoryListener.log_start(), if dirty memory logging clients have
595 * become inactive since the last transaction.
597 * @listener: The #MemoryListener.
598 * @section: The #MemoryRegionSection.
599 * @old: A bitmap of dirty memory logging clients that were active in
600 * the previous transaction.
601 * @new: A bitmap of dirty memory logging clients that are active in
602 * the current transaction.
604 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
610 * Called by memory_region_snapshot_and_clear_dirty() and
611 * memory_global_dirty_log_sync(), before accessing QEMU's "official"
612 * copy of the dirty memory bitmap for a #MemoryRegionSection.
614 * @listener: The #MemoryListener.
615 * @section: The #MemoryRegionSection.
617 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
622 * This is the global version of @log_sync when the listener does
623 * not have a way to synchronize the log with finer granularity.
624 * When the listener registers with @log_sync_global defined, then
625 * its @log_sync must be NULL. Vice versa.
627 * @listener: The #MemoryListener.
629 void (*log_sync_global)(MemoryListener *listener);
634 * Called before reading the dirty memory bitmap for a
635 * #MemoryRegionSection.
637 * @listener: The #MemoryListener.
638 * @section: The #MemoryRegionSection.
640 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
645 * Called by memory_global_dirty_log_start(), which
646 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
647 * the address space. #MemoryListener.log_global_start() is also
648 * called when a #MemoryListener is added, if global dirty logging is
649 * active at that time.
651 * @listener: The #MemoryListener.
653 void (*log_global_start)(MemoryListener *listener);
658 * Called by memory_global_dirty_log_stop(), which
659 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
662 * @listener: The #MemoryListener.
664 void (*log_global_stop)(MemoryListener *listener);
667 * @log_global_after_sync:
669 * Called after reading the dirty memory bitmap
670 * for any #MemoryRegionSection.
672 * @listener: The #MemoryListener.
674 void (*log_global_after_sync)(MemoryListener *listener);
679 * Called during an address space update transaction,
680 * for a section of the address space that has had a new ioeventfd
681 * registration since the last transaction.
683 * @listener: The #MemoryListener.
684 * @section: The new #MemoryRegionSection.
685 * @match_data: The @match_data parameter for the new ioeventfd.
686 * @data: The @data parameter for the new ioeventfd.
687 * @e: The #EventNotifier parameter for the new ioeventfd.
689 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
690 bool match_data, uint64_t data, EventNotifier *e);
695 * Called during an address space update transaction,
696 * for a section of the address space that has dropped an ioeventfd
697 * registration since the last transaction.
699 * @listener: The #MemoryListener.
700 * @section: The new #MemoryRegionSection.
701 * @match_data: The @match_data parameter for the dropped ioeventfd.
702 * @data: The @data parameter for the dropped ioeventfd.
703 * @e: The #EventNotifier parameter for the dropped ioeventfd.
705 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
706 bool match_data, uint64_t data, EventNotifier *e);
711 * Called during an address space update transaction,
712 * for a section of the address space that has had a new coalesced
713 * MMIO range registration since the last transaction.
715 * @listener: The #MemoryListener.
716 * @section: The new #MemoryRegionSection.
717 * @addr: The starting address for the coalesced MMIO range.
718 * @len: The length of the coalesced MMIO range.
720 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
721 hwaddr addr, hwaddr len);
726 * Called during an address space update transaction,
727 * for a section of the address space that has dropped a coalesced
728 * MMIO range since the last transaction.
730 * @listener: The #MemoryListener.
731 * @section: The new #MemoryRegionSection.
732 * @addr: The starting address for the coalesced MMIO range.
733 * @len: The length of the coalesced MMIO range.
735 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
736 hwaddr addr, hwaddr len);
740 * Govern the order in which memory listeners are invoked. Lower priorities
741 * are invoked earlier for "add" or "start" callbacks, and later for "delete"
742 * or "stop" callbacks.
747 AddressSpace *address_space;
748 QTAILQ_ENTRY(MemoryListener) link;
749 QTAILQ_ENTRY(MemoryListener) link_as;
753 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
755 struct AddressSpace {
761 /* Accessed via RCU. */
762 struct FlatView *current_map;
765 struct MemoryRegionIoeventfd *ioeventfds;
766 QTAILQ_HEAD(, MemoryListener) listeners;
767 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
770 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
771 typedef struct FlatRange FlatRange;
773 /* Flattened global view of current active memory hierarchy. Kept in sorted
781 unsigned nr_allocated;
782 struct AddressSpaceDispatch *dispatch;
786 static inline FlatView *address_space_to_flatview(AddressSpace *as)
788 return qatomic_rcu_read(&as->current_map);
792 * typedef flatview_cb: callback for flatview_for_each_range()
794 * @start: start address of the range within the FlatView
795 * @len: length of the range in bytes
796 * @mr: MemoryRegion covering this range
797 * @offset_in_region: offset of the first byte of the range within @mr
798 * @opaque: data pointer passed to flatview_for_each_range()
800 * Returns: true to stop the iteration, false to keep going.
802 typedef bool (*flatview_cb)(Int128 start,
804 const MemoryRegion *mr,
805 hwaddr offset_in_region,
809 * flatview_for_each_range: Iterate through a FlatView
810 * @fv: the FlatView to iterate through
811 * @cb: function to call for each range
812 * @opaque: opaque data pointer to pass to @cb
814 * A FlatView is made up of a list of non-overlapping ranges, each of
815 * which is a slice of a MemoryRegion. This function iterates through
816 * each range in @fv, calling @cb. The callback function can terminate
817 * iteration early by returning 'true'.
819 void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
822 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
824 * @mr: the region, or %NULL if empty
825 * @fv: the flat view of the address space the region is mapped in
826 * @offset_within_region: the beginning of the section, relative to @mr's start
827 * @size: the size of the section; will not exceed @mr's boundaries
828 * @offset_within_address_space: the address of the first byte of the section
829 * relative to the region's address space
830 * @readonly: writes to this section are ignored
831 * @nonvolatile: this section is non-volatile
833 struct MemoryRegionSection {
837 hwaddr offset_within_region;
838 hwaddr offset_within_address_space;
843 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
844 MemoryRegionSection *b)
846 return a->mr == b->mr &&
848 a->offset_within_region == b->offset_within_region &&
849 a->offset_within_address_space == b->offset_within_address_space &&
850 int128_eq(a->size, b->size) &&
851 a->readonly == b->readonly &&
852 a->nonvolatile == b->nonvolatile;
856 * memory_region_init: Initialize a memory region
858 * The region typically acts as a container for other memory regions. Use
859 * memory_region_add_subregion() to add subregions.
861 * @mr: the #MemoryRegion to be initialized
862 * @owner: the object that tracks the region's reference count
863 * @name: used for debugging; not visible to the user or ABI
864 * @size: size of the region; any subregions beyond this size will be clipped
866 void memory_region_init(MemoryRegion *mr,
872 * memory_region_ref: Add 1 to a memory region's reference count
874 * Whenever memory regions are accessed outside the BQL, they need to be
875 * preserved against hot-unplug. MemoryRegions actually do not have their
876 * own reference count; they piggyback on a QOM object, their "owner".
877 * This function adds a reference to the owner.
879 * All MemoryRegions must have an owner if they can disappear, even if the
880 * device they belong to operates exclusively under the BQL. This is because
881 * the region could be returned at any time by memory_region_find, and this
882 * is usually under guest control.
884 * @mr: the #MemoryRegion
886 void memory_region_ref(MemoryRegion *mr);
889 * memory_region_unref: Remove 1 to a memory region's reference count
891 * Whenever memory regions are accessed outside the BQL, they need to be
892 * preserved against hot-unplug. MemoryRegions actually do not have their
893 * own reference count; they piggyback on a QOM object, their "owner".
894 * This function removes a reference to the owner and possibly destroys it.
896 * @mr: the #MemoryRegion
898 void memory_region_unref(MemoryRegion *mr);
901 * memory_region_init_io: Initialize an I/O memory region.
903 * Accesses into the region will cause the callbacks in @ops to be called.
904 * if @size is nonzero, subregions will be clipped to @size.
906 * @mr: the #MemoryRegion to be initialized.
907 * @owner: the object that tracks the region's reference count
908 * @ops: a structure containing read and write callbacks to be used when
909 * I/O is performed on the region.
910 * @opaque: passed to the read and write callbacks of the @ops structure.
911 * @name: used for debugging; not visible to the user or ABI
912 * @size: size of the region.
914 void memory_region_init_io(MemoryRegion *mr,
916 const MemoryRegionOps *ops,
922 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
923 * into the region will modify memory
926 * @mr: the #MemoryRegion to be initialized.
927 * @owner: the object that tracks the region's reference count
928 * @name: Region name, becomes part of RAMBlock name used in migration stream
929 * must be unique within any device
930 * @size: size of the region.
931 * @errp: pointer to Error*, to store an error if it happens.
933 * Note that this function does not do anything to cause the data in the
934 * RAM memory region to be migrated; that is the responsibility of the caller.
936 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
943 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region.
944 * Accesses into the region will
945 * modify memory directly.
947 * @mr: the #MemoryRegion to be initialized.
948 * @owner: the object that tracks the region's reference count
949 * @name: Region name, becomes part of RAMBlock name used in migration stream
950 * must be unique within any device
951 * @size: size of the region.
952 * @share: allow remapping RAM to different addresses
953 * @errp: pointer to Error*, to store an error if it happens.
955 * Note that this function is similar to memory_region_init_ram_nomigrate.
956 * The only difference is part of the RAM region can be remapped.
958 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
966 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
967 * RAM. Accesses into the region will
968 * modify memory directly. Only an initial
969 * portion of this RAM is actually used.
970 * Changing the size while migrating
971 * can result in the migration being
974 * @mr: the #MemoryRegion to be initialized.
975 * @owner: the object that tracks the region's reference count
976 * @name: Region name, becomes part of RAMBlock name used in migration stream
977 * must be unique within any device
978 * @size: used size of the region.
979 * @max_size: max size of the region.
980 * @resized: callback to notify owner about used size change.
981 * @errp: pointer to Error*, to store an error if it happens.
983 * Note that this function does not do anything to cause the data in the
984 * RAM memory region to be migrated; that is the responsibility of the caller.
986 void memory_region_init_resizeable_ram(MemoryRegion *mr,
991 void (*resized)(const char*,
998 * memory_region_init_ram_from_file: Initialize RAM memory region with a
1001 * @mr: the #MemoryRegion to be initialized.
1002 * @owner: the object that tracks the region's reference count
1003 * @name: Region name, becomes part of RAMBlock name used in migration stream
1004 * must be unique within any device
1005 * @size: size of the region.
1006 * @align: alignment of the region base address; if 0, the default alignment
1007 * (getpagesize()) will be used.
1008 * @ram_flags: Memory region features:
1009 * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
1010 * - RAM_PMEM: the memory is persistent memory
1011 * Other bits are ignored now.
1012 * @path: the path in which to allocate the RAM.
1013 * @readonly: true to open @path for reading, false for read/write.
1014 * @errp: pointer to Error*, to store an error if it happens.
1016 * Note that this function does not do anything to cause the data in the
1017 * RAM memory region to be migrated; that is the responsibility of the caller.
1019 void memory_region_init_ram_from_file(MemoryRegion *mr,
1030 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
1033 * @mr: the #MemoryRegion to be initialized.
1034 * @owner: the object that tracks the region's reference count
1035 * @name: the name of the region.
1036 * @size: size of the region.
1037 * @share: %true if memory must be mmaped with the MAP_SHARED flag
1038 * @fd: the fd to mmap.
1039 * @offset: offset within the file referenced by fd
1040 * @errp: pointer to Error*, to store an error if it happens.
1042 * Note that this function does not do anything to cause the data in the
1043 * RAM memory region to be migrated; that is the responsibility of the caller.
1045 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1056 * memory_region_init_ram_ptr: Initialize RAM memory region from a
1057 * user-provided pointer. Accesses into the
1058 * region will modify memory directly.
1060 * @mr: the #MemoryRegion to be initialized.
1061 * @owner: the object that tracks the region's reference count
1062 * @name: Region name, becomes part of RAMBlock name used in migration stream
1063 * must be unique within any device
1064 * @size: size of the region.
1065 * @ptr: memory to be mapped; must contain at least @size bytes.
1067 * Note that this function does not do anything to cause the data in the
1068 * RAM memory region to be migrated; that is the responsibility of the caller.
1070 void memory_region_init_ram_ptr(MemoryRegion *mr,
1077 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
1078 * a user-provided pointer.
1080 * A RAM device represents a mapping to a physical device, such as to a PCI
1081 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
1082 * into the VM address space and access to the region will modify memory
1083 * directly. However, the memory region should not be included in a memory
1084 * dump (device may not be enabled/mapped at the time of the dump), and
1085 * operations incompatible with manipulating MMIO should be avoided. Replaces
1088 * @mr: the #MemoryRegion to be initialized.
1089 * @owner: the object that tracks the region's reference count
1090 * @name: the name of the region.
1091 * @size: size of the region.
1092 * @ptr: memory to be mapped; must contain at least @size bytes.
1094 * Note that this function does not do anything to cause the data in the
1095 * RAM memory region to be migrated; that is the responsibility of the caller.
1096 * (For RAM device memory regions, migrating the contents rarely makes sense.)
1098 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1105 * memory_region_init_alias: Initialize a memory region that aliases all or a
1106 * part of another memory region.
1108 * @mr: the #MemoryRegion to be initialized.
1109 * @owner: the object that tracks the region's reference count
1110 * @name: used for debugging; not visible to the user or ABI
1111 * @orig: the region to be referenced; @mr will be equivalent to
1112 * @orig between @offset and @offset + @size - 1.
1113 * @offset: start of the section in @orig to be referenced.
1114 * @size: size of the region.
1116 void memory_region_init_alias(MemoryRegion *mr,
1124 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
1126 * This has the same effect as calling memory_region_init_ram_nomigrate()
1127 * and then marking the resulting region read-only with
1128 * memory_region_set_readonly().
1130 * Note that this function does not do anything to cause the data in the
1131 * RAM side of the memory region to be migrated; that is the responsibility
1134 * @mr: the #MemoryRegion to be initialized.
1135 * @owner: the object that tracks the region's reference count
1136 * @name: Region name, becomes part of RAMBlock name used in migration stream
1137 * must be unique within any device
1138 * @size: size of the region.
1139 * @errp: pointer to Error*, to store an error if it happens.
1141 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1148 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
1149 * Writes are handled via callbacks.
1151 * Note that this function does not do anything to cause the data in the
1152 * RAM side of the memory region to be migrated; that is the responsibility
1155 * @mr: the #MemoryRegion to be initialized.
1156 * @owner: the object that tracks the region's reference count
1157 * @ops: callbacks for write access handling (must not be NULL).
1158 * @opaque: passed to the read and write callbacks of the @ops structure.
1159 * @name: Region name, becomes part of RAMBlock name used in migration stream
1160 * must be unique within any device
1161 * @size: size of the region.
1162 * @errp: pointer to Error*, to store an error if it happens.
1164 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1166 const MemoryRegionOps *ops,
1173 * memory_region_init_iommu: Initialize a memory region of a custom type
1174 * that translates addresses
1176 * An IOMMU region translates addresses and forwards accesses to a target
1179 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1180 * @_iommu_mr should be a pointer to enough memory for an instance of
1181 * that subclass, @instance_size is the size of that subclass, and
1182 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1183 * instance of the subclass, and its methods will then be called to handle
1184 * accesses to the memory region. See the documentation of
1185 * #IOMMUMemoryRegionClass for further details.
1187 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1188 * @instance_size: the IOMMUMemoryRegion subclass instance size
1189 * @mrtypename: the type name of the #IOMMUMemoryRegion
1190 * @owner: the object that tracks the region's reference count
1191 * @name: used for debugging; not visible to the user or ABI
1192 * @size: size of the region.
1194 void memory_region_init_iommu(void *_iommu_mr,
1195 size_t instance_size,
1196 const char *mrtypename,
1202 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
1203 * region will modify memory directly.
1205 * @mr: the #MemoryRegion to be initialized
1206 * @owner: the object that tracks the region's reference count (must be
1207 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1208 * @name: name of the memory region
1209 * @size: size of the region in bytes
1210 * @errp: pointer to Error*, to store an error if it happens.
1212 * This function allocates RAM for a board model or device, and
1213 * arranges for it to be migrated (by calling vmstate_register_ram()
1214 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1217 * TODO: Currently we restrict @owner to being either NULL (for
1218 * global RAM regions with no owner) or devices, so that we can
1219 * give the RAM block a unique name for migration purposes.
1220 * We should lift this restriction and allow arbitrary Objects.
1221 * If you pass a non-NULL non-device @owner then we will assert.
1223 void memory_region_init_ram(MemoryRegion *mr,
1230 * memory_region_init_rom: Initialize a ROM memory region.
1232 * This has the same effect as calling memory_region_init_ram()
1233 * and then marking the resulting region read-only with
1234 * memory_region_set_readonly(). This includes arranging for the
1235 * contents to be migrated.
1237 * TODO: Currently we restrict @owner to being either NULL (for
1238 * global RAM regions with no owner) or devices, so that we can
1239 * give the RAM block a unique name for migration purposes.
1240 * We should lift this restriction and allow arbitrary Objects.
1241 * If you pass a non-NULL non-device @owner then we will assert.
1243 * @mr: the #MemoryRegion to be initialized.
1244 * @owner: the object that tracks the region's reference count
1245 * @name: Region name, becomes part of RAMBlock name used in migration stream
1246 * must be unique within any device
1247 * @size: size of the region.
1248 * @errp: pointer to Error*, to store an error if it happens.
1250 void memory_region_init_rom(MemoryRegion *mr,
1257 * memory_region_init_rom_device: Initialize a ROM memory region.
1258 * Writes are handled via callbacks.
1260 * This function initializes a memory region backed by RAM for reads
1261 * and callbacks for writes, and arranges for the RAM backing to
1262 * be migrated (by calling vmstate_register_ram()
1263 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1266 * TODO: Currently we restrict @owner to being either NULL (for
1267 * global RAM regions with no owner) or devices, so that we can
1268 * give the RAM block a unique name for migration purposes.
1269 * We should lift this restriction and allow arbitrary Objects.
1270 * If you pass a non-NULL non-device @owner then we will assert.
1272 * @mr: the #MemoryRegion to be initialized.
1273 * @owner: the object that tracks the region's reference count
1274 * @ops: callbacks for write access handling (must not be NULL).
1275 * @opaque: passed to the read and write callbacks of the @ops structure.
1276 * @name: Region name, becomes part of RAMBlock name used in migration stream
1277 * must be unique within any device
1278 * @size: size of the region.
1279 * @errp: pointer to Error*, to store an error if it happens.
1281 void memory_region_init_rom_device(MemoryRegion *mr,
1283 const MemoryRegionOps *ops,
1291 * memory_region_owner: get a memory region's owner.
1293 * @mr: the memory region being queried.
1295 Object *memory_region_owner(MemoryRegion *mr);
1298 * memory_region_size: get a memory region's size.
1300 * @mr: the memory region being queried.
1302 uint64_t memory_region_size(MemoryRegion *mr);
1305 * memory_region_is_ram: check whether a memory region is random access
1307 * Returns %true if a memory region is random access.
1309 * @mr: the memory region being queried
1311 static inline bool memory_region_is_ram(MemoryRegion *mr)
1317 * memory_region_is_ram_device: check whether a memory region is a ram device
1319 * Returns %true if a memory region is a device backed ram region
1321 * @mr: the memory region being queried
1323 bool memory_region_is_ram_device(MemoryRegion *mr);
1326 * memory_region_is_romd: check whether a memory region is in ROMD mode
1328 * Returns %true if a memory region is a ROM device and currently set to allow
1331 * @mr: the memory region being queried
1333 static inline bool memory_region_is_romd(MemoryRegion *mr)
1335 return mr->rom_device && mr->romd_mode;
1339 * memory_region_get_iommu: check whether a memory region is an iommu
1341 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1344 * @mr: the memory region being queried
1346 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1349 return memory_region_get_iommu(mr->alias);
1352 return (IOMMUMemoryRegion *) mr;
1358 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1359 * if an iommu or NULL if not
1361 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1362 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1364 * @iommu_mr: the memory region being queried
1366 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1367 IOMMUMemoryRegion *iommu_mr)
1369 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1372 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1375 * memory_region_iommu_get_min_page_size: get minimum supported page size
1378 * Returns minimum supported page size for an iommu.
1380 * @iommu_mr: the memory region being queried
1382 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1385 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1387 * Note: for any IOMMU implementation, an in-place mapping change
1388 * should be notified with an UNMAP followed by a MAP.
1390 * @iommu_mr: the memory region that was changed
1391 * @iommu_idx: the IOMMU index for the translation table which has changed
1392 * @event: TLB event with the new entry in the IOMMU translation table.
1393 * The entry replaces all old entries for the same virtual I/O address
1396 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1398 IOMMUTLBEvent event);
1401 * memory_region_notify_iommu_one: notify a change in an IOMMU translation
1402 * entry to a single notifier
1404 * This works just like memory_region_notify_iommu(), but it only
1405 * notifies a specific notifier, not all of them.
1407 * @notifier: the notifier to be notified
1408 * @event: TLB event with the new entry in the IOMMU translation table.
1409 * The entry replaces all old entries for the same virtual I/O address
1412 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1413 IOMMUTLBEvent *event);
1416 * memory_region_register_iommu_notifier: register a notifier for changes to
1417 * IOMMU translation entries.
1419 * Returns 0 on success, or a negative errno otherwise. In particular,
1420 * -EINVAL indicates that at least one of the attributes of the notifier
1421 * is not supported (flag/range) by the IOMMU memory region. In case of error
1422 * the error object must be created.
1424 * @mr: the memory region to observe
1425 * @n: the IOMMUNotifier to be added; the notify callback receives a
1426 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1427 * ceases to be valid on exit from the notifier.
1428 * @errp: pointer to Error*, to store an error if it happens.
1430 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1431 IOMMUNotifier *n, Error **errp);
1434 * memory_region_iommu_replay: replay existing IOMMU translations to
1435 * a notifier with the minimum page granularity returned by
1436 * mr->iommu_ops->get_page_size().
1438 * Note: this is not related to record-and-replay functionality.
1440 * @iommu_mr: the memory region to observe
1441 * @n: the notifier to which to replay iommu mappings
1443 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1446 * memory_region_unregister_iommu_notifier: unregister a notifier for
1447 * changes to IOMMU translation entries.
1449 * @mr: the memory region which was observed and for which notity_stopped()
1450 * needs to be called
1451 * @n: the notifier to be removed.
1453 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1457 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1458 * defined on the IOMMU.
1460 * Returns 0 on success, or a negative errno otherwise. In particular,
1461 * -EINVAL indicates that the IOMMU does not support the requested
1464 * @iommu_mr: the memory region
1465 * @attr: the requested attribute
1466 * @data: a pointer to the requested attribute data
1468 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1469 enum IOMMUMemoryRegionAttr attr,
1473 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1474 * use for translations with the given memory transaction attributes.
1476 * @iommu_mr: the memory region
1477 * @attrs: the memory transaction attributes
1479 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1483 * memory_region_iommu_num_indexes: return the total number of IOMMU
1484 * indexes that this IOMMU supports.
1486 * @iommu_mr: the memory region
1488 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1491 * memory_region_iommu_set_page_size_mask: set the supported page
1492 * sizes for a given IOMMU memory region
1494 * @iommu_mr: IOMMU memory region
1495 * @page_size_mask: supported page size mask
1496 * @errp: pointer to Error*, to store an error if it happens.
1498 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1499 uint64_t page_size_mask,
1503 * memory_region_name: get a memory region's name
1505 * Returns the string that was used to initialize the memory region.
1507 * @mr: the memory region being queried
1509 const char *memory_region_name(const MemoryRegion *mr);
1512 * memory_region_is_logging: return whether a memory region is logging writes
1514 * Returns %true if the memory region is logging writes for the given client
1516 * @mr: the memory region being queried
1517 * @client: the client being queried
1519 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1522 * memory_region_get_dirty_log_mask: return the clients for which a
1523 * memory region is logging writes.
1525 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1526 * are the bit indices.
1528 * @mr: the memory region being queried
1530 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1533 * memory_region_is_rom: check whether a memory region is ROM
1535 * Returns %true if a memory region is read-only memory.
1537 * @mr: the memory region being queried
1539 static inline bool memory_region_is_rom(MemoryRegion *mr)
1541 return mr->ram && mr->readonly;
1545 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1547 * Returns %true is a memory region is non-volatile memory.
1549 * @mr: the memory region being queried
1551 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1553 return mr->nonvolatile;
1557 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1559 * Returns a file descriptor backing a file-based RAM memory region,
1560 * or -1 if the region is not a file-based RAM memory region.
1562 * @mr: the RAM or alias memory region being queried.
1564 int memory_region_get_fd(MemoryRegion *mr);
1567 * memory_region_from_host: Convert a pointer into a RAM memory region
1568 * and an offset within it.
1570 * Given a host pointer inside a RAM memory region (created with
1571 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1572 * the MemoryRegion and the offset within it.
1574 * Use with care; by the time this function returns, the returned pointer is
1575 * not protected by RCU anymore. If the caller is not within an RCU critical
1576 * section and does not hold the iothread lock, it must have other means of
1577 * protecting the pointer, such as a reference to the region that includes
1578 * the incoming ram_addr_t.
1580 * @ptr: the host pointer to be converted
1581 * @offset: the offset within memory region
1583 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1586 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1588 * Returns a host pointer to a RAM memory region (created with
1589 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1591 * Use with care; by the time this function returns, the returned pointer is
1592 * not protected by RCU anymore. If the caller is not within an RCU critical
1593 * section and does not hold the iothread lock, it must have other means of
1594 * protecting the pointer, such as a reference to the region that includes
1595 * the incoming ram_addr_t.
1597 * @mr: the memory region being queried.
1599 void *memory_region_get_ram_ptr(MemoryRegion *mr);
1601 /* memory_region_ram_resize: Resize a RAM region.
1603 * Resizing RAM while migrating can result in the migration being canceled.
1604 * Care has to be taken if the guest might have already detected the memory.
1606 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1607 * @newsize: the new size the region
1608 * @errp: pointer to Error*, to store an error if it happens.
1610 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1614 * memory_region_msync: Synchronize selected address range of
1615 * a memory mapped region
1617 * @mr: the memory region to be msync
1618 * @addr: the initial address of the range to be sync
1619 * @size: the size of the range to be sync
1621 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
1624 * memory_region_writeback: Trigger cache writeback for
1625 * selected address range
1627 * @mr: the memory region to be updated
1628 * @addr: the initial address of the range to be written back
1629 * @size: the size of the range to be written back
1631 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
1634 * memory_region_set_log: Turn dirty logging on or off for a region.
1636 * Turns dirty logging on or off for a specified client (display, migration).
1637 * Only meaningful for RAM regions.
1639 * @mr: the memory region being updated.
1640 * @log: whether dirty logging is to be enabled or disabled.
1641 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1643 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1646 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1648 * Marks a range of bytes as dirty, after it has been dirtied outside
1651 * @mr: the memory region being dirtied.
1652 * @addr: the address (relative to the start of the region) being dirtied.
1653 * @size: size of the range being dirtied.
1655 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1659 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
1661 * This function is called when the caller wants to clear the remote
1662 * dirty bitmap of a memory range within the memory region. This can
1663 * be used by e.g. KVM to manually clear dirty log when
1664 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
1667 * @mr: the memory region to clear the dirty log upon
1668 * @start: start address offset within the memory region
1669 * @len: length of the memory region to clear dirty bitmap
1671 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
1675 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1676 * bitmap and clear it.
1678 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1679 * returns the snapshot. The snapshot can then be used to query dirty
1680 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
1681 * querying the same page multiple times, which is especially useful for
1682 * display updates where the scanlines often are not page aligned.
1684 * The dirty bitmap region which gets copyed into the snapshot (and
1685 * cleared afterwards) can be larger than requested. The boundaries
1686 * are rounded up/down so complete bitmap longs (covering 64 pages on
1687 * 64bit hosts) can be copied over into the bitmap snapshot. Which
1688 * isn't a problem for display updates as the extra pages are outside
1689 * the visible area, and in case the visible area changes a full
1690 * display redraw is due anyway. Should other use cases for this
1691 * function emerge we might have to revisit this implementation
1694 * Use g_free to release DirtyBitmapSnapshot.
1696 * @mr: the memory region being queried.
1697 * @addr: the address (relative to the start of the region) being queried.
1698 * @size: the size of the range being queried.
1699 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1701 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1707 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1708 * in the specified dirty bitmap snapshot.
1710 * @mr: the memory region being queried.
1711 * @snap: the dirty bitmap snapshot
1712 * @addr: the address (relative to the start of the region) being queried.
1713 * @size: the size of the range being queried.
1715 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
1716 DirtyBitmapSnapshot *snap,
1717 hwaddr addr, hwaddr size);
1720 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1723 * Marks a range of pages as no longer dirty.
1725 * @mr: the region being updated.
1726 * @addr: the start of the subrange being cleaned.
1727 * @size: the size of the subrange being cleaned.
1728 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1729 * %DIRTY_MEMORY_VGA.
1731 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1732 hwaddr size, unsigned client);
1735 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
1736 * TBs (for self-modifying code).
1738 * The MemoryRegionOps->write() callback of a ROM device must use this function
1739 * to mark byte ranges that have been modified internally, such as by directly
1740 * accessing the memory returned by memory_region_get_ram_ptr().
1742 * This function marks the range dirty and invalidates TBs so that TCG can
1743 * detect self-modifying code.
1745 * @mr: the region being flushed.
1746 * @addr: the start, relative to the start of the region, of the range being
1748 * @size: the size, in bytes, of the range being flushed.
1750 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
1753 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1755 * Allows a memory region to be marked as read-only (turning it into a ROM).
1756 * only useful on RAM regions.
1758 * @mr: the region being updated.
1759 * @readonly: whether rhe region is to be ROM or RAM.
1761 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1764 * memory_region_set_nonvolatile: Turn a memory region non-volatile
1766 * Allows a memory region to be marked as non-volatile.
1767 * only useful on RAM regions.
1769 * @mr: the region being updated.
1770 * @nonvolatile: whether rhe region is to be non-volatile.
1772 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
1775 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1777 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1778 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1779 * device is mapped to guest memory and satisfies read access directly.
1780 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1781 * Writes are always handled by the #MemoryRegion.write function.
1783 * @mr: the memory region to be updated
1784 * @romd_mode: %true to put the region into ROMD mode
1786 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1789 * memory_region_set_coalescing: Enable memory coalescing for the region.
1791 * Enabled writes to a region to be queued for later processing. MMIO ->write
1792 * callbacks may be delayed until a non-coalesced MMIO is issued.
1793 * Only useful for IO regions. Roughly similar to write-combining hardware.
1795 * @mr: the memory region to be write coalesced
1797 void memory_region_set_coalescing(MemoryRegion *mr);
1800 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1803 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1804 * Multiple calls can be issued coalesced disjoint ranges.
1806 * @mr: the memory region to be updated.
1807 * @offset: the start of the range within the region to be coalesced.
1808 * @size: the size of the subrange to be coalesced.
1810 void memory_region_add_coalescing(MemoryRegion *mr,
1815 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1817 * Disables any coalescing caused by memory_region_set_coalescing() or
1818 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1821 * @mr: the memory region to be updated.
1823 void memory_region_clear_coalescing(MemoryRegion *mr);
1826 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1829 * Ensure that pending coalesced MMIO request are flushed before the memory
1830 * region is accessed. This property is automatically enabled for all regions
1831 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1833 * @mr: the memory region to be updated.
1835 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1838 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1841 * Clear the automatic coalesced MMIO flushing enabled via
1842 * memory_region_set_flush_coalesced. Note that this service has no effect on
1843 * memory regions that have MMIO coalescing enabled for themselves. For them,
1844 * automatic flushing will stop once coalescing is disabled.
1846 * @mr: the memory region to be updated.
1848 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1851 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1852 * is written to a location.
1854 * Marks a word in an IO region (initialized with memory_region_init_io())
1855 * as a trigger for an eventfd event. The I/O callback will not be called.
1856 * The caller must be prepared to handle failure (that is, take the required
1857 * action if the callback _is_ called).
1859 * @mr: the memory region being updated.
1860 * @addr: the address within @mr that is to be monitored
1861 * @size: the size of the access to trigger the eventfd
1862 * @match_data: whether to match against @data, instead of just @addr
1863 * @data: the data to match against the guest write
1864 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1866 void memory_region_add_eventfd(MemoryRegion *mr,
1874 * memory_region_del_eventfd: Cancel an eventfd.
1876 * Cancels an eventfd trigger requested by a previous
1877 * memory_region_add_eventfd() call.
1879 * @mr: the memory region being updated.
1880 * @addr: the address within @mr that is to be monitored
1881 * @size: the size of the access to trigger the eventfd
1882 * @match_data: whether to match against @data, instead of just @addr
1883 * @data: the data to match against the guest write
1884 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1886 void memory_region_del_eventfd(MemoryRegion *mr,
1894 * memory_region_add_subregion: Add a subregion to a container.
1896 * Adds a subregion at @offset. The subregion may not overlap with other
1897 * subregions (except for those explicitly marked as overlapping). A region
1898 * may only be added once as a subregion (unless removed with
1899 * memory_region_del_subregion()); use memory_region_init_alias() if you
1900 * want a region to be a subregion in multiple locations.
1902 * @mr: the region to contain the new subregion; must be a container
1903 * initialized with memory_region_init().
1904 * @offset: the offset relative to @mr where @subregion is added.
1905 * @subregion: the subregion to be added.
1907 void memory_region_add_subregion(MemoryRegion *mr,
1909 MemoryRegion *subregion);
1911 * memory_region_add_subregion_overlap: Add a subregion to a container
1914 * Adds a subregion at @offset. The subregion may overlap with other
1915 * subregions. Conflicts are resolved by having a higher @priority hide a
1916 * lower @priority. Subregions without priority are taken as @priority 0.
1917 * A region may only be added once as a subregion (unless removed with
1918 * memory_region_del_subregion()); use memory_region_init_alias() if you
1919 * want a region to be a subregion in multiple locations.
1921 * @mr: the region to contain the new subregion; must be a container
1922 * initialized with memory_region_init().
1923 * @offset: the offset relative to @mr where @subregion is added.
1924 * @subregion: the subregion to be added.
1925 * @priority: used for resolving overlaps; highest priority wins.
1927 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1929 MemoryRegion *subregion,
1933 * memory_region_get_ram_addr: Get the ram address associated with a memory
1936 * @mr: the region to be queried
1938 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1940 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1942 * memory_region_del_subregion: Remove a subregion.
1944 * Removes a subregion from its container.
1946 * @mr: the container to be updated.
1947 * @subregion: the region being removed; must be a current subregion of @mr.
1949 void memory_region_del_subregion(MemoryRegion *mr,
1950 MemoryRegion *subregion);
1953 * memory_region_set_enabled: dynamically enable or disable a region
1955 * Enables or disables a memory region. A disabled memory region
1956 * ignores all accesses to itself and its subregions. It does not
1957 * obscure sibling subregions with lower priority - it simply behaves as
1958 * if it was removed from the hierarchy.
1960 * Regions default to being enabled.
1962 * @mr: the region to be updated
1963 * @enabled: whether to enable or disable the region
1965 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1968 * memory_region_set_address: dynamically update the address of a region
1970 * Dynamically updates the address of a region, relative to its container.
1971 * May be used on regions are currently part of a memory hierarchy.
1973 * @mr: the region to be updated
1974 * @addr: new address, relative to container region
1976 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1979 * memory_region_set_size: dynamically update the size of a region.
1981 * Dynamically updates the size of a region.
1983 * @mr: the region to be updated
1984 * @size: used size of the region.
1986 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1989 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1991 * Dynamically updates the offset into the target region that an alias points
1992 * to, as if the fourth argument to memory_region_init_alias() has changed.
1994 * @mr: the #MemoryRegion to be updated; should be an alias.
1995 * @offset: the new offset into the target memory region
1997 void memory_region_set_alias_offset(MemoryRegion *mr,
2001 * memory_region_present: checks if an address relative to a @container
2002 * translates into #MemoryRegion within @container
2004 * Answer whether a #MemoryRegion within @container covers the address
2007 * @container: a #MemoryRegion within which @addr is a relative address
2008 * @addr: the area within @container to be searched
2010 bool memory_region_present(MemoryRegion *container, hwaddr addr);
2013 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
2014 * into any address space.
2016 * @mr: a #MemoryRegion which should be checked if it's mapped
2018 bool memory_region_is_mapped(MemoryRegion *mr);
2021 * memory_region_find: translate an address/size relative to a
2022 * MemoryRegion into a #MemoryRegionSection.
2024 * Locates the first #MemoryRegion within @mr that overlaps the range
2025 * given by @addr and @size.
2027 * Returns a #MemoryRegionSection that describes a contiguous overlap.
2028 * It will have the following characteristics:
2029 * - @size = 0 iff no overlap was found
2030 * - @mr is non-%NULL iff an overlap was found
2032 * Remember that in the return value the @offset_within_region is
2033 * relative to the returned region (in the .@mr field), not to the
2036 * Similarly, the .@offset_within_address_space is relative to the
2037 * address space that contains both regions, the passed and the
2038 * returned one. However, in the special case where the @mr argument
2039 * has no container (and thus is the root of the address space), the
2040 * following will hold:
2041 * - @offset_within_address_space >= @addr
2042 * - @offset_within_address_space + .@size <= @addr + @size
2044 * @mr: a MemoryRegion within which @addr is a relative address
2045 * @addr: start of the area within @as to be searched
2046 * @size: size of the area to be searched
2048 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2049 hwaddr addr, uint64_t size);
2052 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2054 * Synchronizes the dirty page log for all address spaces.
2056 void memory_global_dirty_log_sync(void);
2059 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2061 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2062 * This function must be called after the dirty log bitmap is cleared, and
2063 * before dirty guest memory pages are read. If you are using
2064 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2065 * care of doing this.
2067 void memory_global_after_dirty_log_sync(void);
2070 * memory_region_transaction_begin: Start a transaction.
2072 * During a transaction, changes will be accumulated and made visible
2073 * only when the transaction ends (is committed).
2075 void memory_region_transaction_begin(void);
2078 * memory_region_transaction_commit: Commit a transaction and make changes
2079 * visible to the guest.
2081 void memory_region_transaction_commit(void);
2084 * memory_listener_register: register callbacks to be called when memory
2085 * sections are mapped or unmapped into an address
2088 * @listener: an object containing the callbacks to be called
2089 * @filter: if non-%NULL, only regions in this address space will be observed
2091 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
2094 * memory_listener_unregister: undo the effect of memory_listener_register()
2096 * @listener: an object containing the callbacks to be removed
2098 void memory_listener_unregister(MemoryListener *listener);
2101 * memory_global_dirty_log_start: begin dirty logging for all regions
2103 void memory_global_dirty_log_start(void);
2106 * memory_global_dirty_log_stop: end dirty logging for all regions
2108 void memory_global_dirty_log_stop(void);
2110 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
2113 * memory_region_dispatch_read: perform a read directly to the specified
2116 * @mr: #MemoryRegion to access
2117 * @addr: address within that region
2118 * @pval: pointer to uint64_t which the data is written to
2119 * @op: size, sign, and endianness of the memory operation
2120 * @attrs: memory transaction attributes to use for the access
2122 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2128 * memory_region_dispatch_write: perform a write directly to the specified
2131 * @mr: #MemoryRegion to access
2132 * @addr: address within that region
2133 * @data: data to write
2134 * @op: size, sign, and endianness of the memory operation
2135 * @attrs: memory transaction attributes to use for the access
2137 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2144 * address_space_init: initializes an address space
2146 * @as: an uninitialized #AddressSpace
2147 * @root: a #MemoryRegion that routes addresses for the address space
2148 * @name: an address space name. The name is only used for debugging
2151 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
2154 * address_space_destroy: destroy an address space
2156 * Releases all resources associated with an address space. After an address space
2157 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2160 * @as: address space to be destroyed
2162 void address_space_destroy(AddressSpace *as);
2165 * address_space_remove_listeners: unregister all listeners of an address space
2167 * Removes all callbacks previously registered with memory_listener_register()
2170 * @as: an initialized #AddressSpace
2172 void address_space_remove_listeners(AddressSpace *as);
2175 * address_space_rw: read from or write to an address space.
2177 * Return a MemTxResult indicating whether the operation succeeded
2178 * or failed (eg unassigned memory, device rejected the transaction,
2181 * @as: #AddressSpace to be accessed
2182 * @addr: address within that address space
2183 * @attrs: memory transaction attributes
2184 * @buf: buffer with the data transferred
2185 * @len: the number of bytes to read or write
2186 * @is_write: indicates the transfer direction
2188 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
2189 MemTxAttrs attrs, void *buf,
2190 hwaddr len, bool is_write);
2193 * address_space_write: write to address space.
2195 * Return a MemTxResult indicating whether the operation succeeded
2196 * or failed (eg unassigned memory, device rejected the transaction,
2199 * @as: #AddressSpace to be accessed
2200 * @addr: address within that address space
2201 * @attrs: memory transaction attributes
2202 * @buf: buffer with the data transferred
2203 * @len: the number of bytes to write
2205 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2207 const void *buf, hwaddr len);
2210 * address_space_write_rom: write to address space, including ROM.
2212 * This function writes to the specified address space, but will
2213 * write data to both ROM and RAM. This is used for non-guest
2214 * writes like writes from the gdb debug stub or initial loading
2217 * Note that portions of the write which attempt to write data to
2218 * a device will be silently ignored -- only real RAM and ROM will
2221 * Return a MemTxResult indicating whether the operation succeeded
2222 * or failed (eg unassigned memory, device rejected the transaction,
2225 * @as: #AddressSpace to be accessed
2226 * @addr: address within that address space
2227 * @attrs: memory transaction attributes
2228 * @buf: buffer with the data transferred
2229 * @len: the number of bytes to write
2231 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2233 const void *buf, hwaddr len);
2235 /* address_space_ld*: load from an address space
2236 * address_space_st*: store to an address space
2238 * These functions perform a load or store of the byte, word,
2239 * longword or quad to the specified address within the AddressSpace.
2240 * The _le suffixed functions treat the data as little endian;
2241 * _be indicates big endian; no suffix indicates "same endianness
2244 * The "guest CPU endianness" accessors are deprecated for use outside
2245 * target-* code; devices should be CPU-agnostic and use either the LE
2246 * or the BE accessors.
2248 * @as #AddressSpace to be accessed
2249 * @addr: address within that address space
2250 * @val: data value, for stores
2251 * @attrs: memory transaction attributes
2252 * @result: location to write the success/failure of the transaction;
2253 * if NULL, this information is discarded
2258 #define ARG1_DECL AddressSpace *as
2259 #include "exec/memory_ldst.h.inc"
2263 #define ARG1_DECL AddressSpace *as
2264 #include "exec/memory_ldst_phys.h.inc"
2266 struct MemoryRegionCache {
2271 MemoryRegionSection mrs;
2275 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
2278 /* address_space_ld*_cached: load from a cached #MemoryRegion
2279 * address_space_st*_cached: store into a cached #MemoryRegion
2281 * These functions perform a load or store of the byte, word,
2282 * longword or quad to the specified address. The address is
2283 * a physical address in the AddressSpace, but it must lie within
2284 * a #MemoryRegion that was mapped with address_space_cache_init.
2286 * The _le suffixed functions treat the data as little endian;
2287 * _be indicates big endian; no suffix indicates "same endianness
2290 * The "guest CPU endianness" accessors are deprecated for use outside
2291 * target-* code; devices should be CPU-agnostic and use either the LE
2292 * or the BE accessors.
2294 * @cache: previously initialized #MemoryRegionCache to be accessed
2295 * @addr: address within the address space
2296 * @val: data value, for stores
2297 * @attrs: memory transaction attributes
2298 * @result: location to write the success/failure of the transaction;
2299 * if NULL, this information is discarded
2302 #define SUFFIX _cached_slow
2304 #define ARG1_DECL MemoryRegionCache *cache
2305 #include "exec/memory_ldst.h.inc"
2307 /* Inline fast path for direct RAM access. */
2308 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2309 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2311 assert(addr < cache->len);
2312 if (likely(cache->ptr)) {
2313 return ldub_p(cache->ptr + addr);
2315 return address_space_ldub_cached_slow(cache, addr, attrs, result);
2319 static inline void address_space_stb_cached(MemoryRegionCache *cache,
2320 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
2322 assert(addr < cache->len);
2323 if (likely(cache->ptr)) {
2324 stb_p(cache->ptr + addr, val);
2326 address_space_stb_cached_slow(cache, addr, val, attrs, result);
2330 #define ENDIANNESS _le
2331 #include "exec/memory_ldst_cached.h.inc"
2333 #define ENDIANNESS _be
2334 #include "exec/memory_ldst_cached.h.inc"
2336 #define SUFFIX _cached
2338 #define ARG1_DECL MemoryRegionCache *cache
2339 #include "exec/memory_ldst_phys.h.inc"
2341 /* address_space_cache_init: prepare for repeated access to a physical
2344 * @cache: #MemoryRegionCache to be filled
2345 * @as: #AddressSpace to be accessed
2346 * @addr: address within that address space
2347 * @len: length of buffer
2348 * @is_write: indicates the transfer direction
2350 * Will only work with RAM, and may map a subset of the requested range by
2351 * returning a value that is less than @len. On failure, return a negative
2354 * Because it only works with RAM, this function can be used for
2355 * read-modify-write operations. In this case, is_write should be %true.
2357 * Note that addresses passed to the address_space_*_cached functions
2358 * are relative to @addr.
2360 int64_t address_space_cache_init(MemoryRegionCache *cache,
2367 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2369 * @cache: The #MemoryRegionCache to operate on.
2370 * @addr: The first physical address that was written, relative to the
2371 * address that was passed to @address_space_cache_init.
2372 * @access_len: The number of bytes that were written starting at @addr.
2374 void address_space_cache_invalidate(MemoryRegionCache *cache,
2379 * address_space_cache_destroy: free a #MemoryRegionCache
2381 * @cache: The #MemoryRegionCache whose memory should be released.
2383 void address_space_cache_destroy(MemoryRegionCache *cache);
2385 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2386 * entry. Should be called from an RCU critical section.
2388 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2389 bool is_write, MemTxAttrs attrs);
2391 /* address_space_translate: translate an address range into an address space
2392 * into a MemoryRegion and an address range into that section. Should be
2393 * called from an RCU critical section, to avoid that the last reference
2394 * to the returned region disappears after address_space_translate returns.
2396 * @fv: #FlatView to be accessed
2397 * @addr: address within that address space
2398 * @xlat: pointer to address within the returned memory region section's
2400 * @len: pointer to length
2401 * @is_write: indicates the transfer direction
2402 * @attrs: memory attributes
2404 MemoryRegion *flatview_translate(FlatView *fv,
2405 hwaddr addr, hwaddr *xlat,
2406 hwaddr *len, bool is_write,
2409 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2410 hwaddr addr, hwaddr *xlat,
2411 hwaddr *len, bool is_write,
2414 return flatview_translate(address_space_to_flatview(as),
2415 addr, xlat, len, is_write, attrs);
2418 /* address_space_access_valid: check for validity of accessing an address
2421 * Check whether memory is assigned to the given address space range, and
2422 * access is permitted by any IOMMU regions that are active for the address
2425 * For now, addr and len should be aligned to a page size. This limitation
2426 * will be lifted in the future.
2428 * @as: #AddressSpace to be accessed
2429 * @addr: address within that address space
2430 * @len: length of the area to be checked
2431 * @is_write: indicates the transfer direction
2432 * @attrs: memory attributes
2434 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2435 bool is_write, MemTxAttrs attrs);
2437 /* address_space_map: map a physical memory region into a host virtual address
2439 * May map a subset of the requested range, given by and returned in @plen.
2440 * May return %NULL and set *@plen to zero(0), if resources needed to perform
2441 * the mapping are exhausted.
2442 * Use only for reads OR writes - not for read-modify-write operations.
2443 * Use cpu_register_map_client() to know when retrying the map operation is
2444 * likely to succeed.
2446 * @as: #AddressSpace to be accessed
2447 * @addr: address within that address space
2448 * @plen: pointer to length of buffer; updated on return
2449 * @is_write: indicates the transfer direction
2450 * @attrs: memory attributes
2452 void *address_space_map(AddressSpace *as, hwaddr addr,
2453 hwaddr *plen, bool is_write, MemTxAttrs attrs);
2455 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2457 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2458 * the amount of memory that was actually read or written by the caller.
2460 * @as: #AddressSpace used
2461 * @buffer: host pointer as returned by address_space_map()
2462 * @len: buffer length as returned by address_space_map()
2463 * @access_len: amount of data actually transferred
2464 * @is_write: indicates the transfer direction
2466 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2467 bool is_write, hwaddr access_len);
2470 /* Internal functions, part of the implementation of address_space_read. */
2471 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2472 MemTxAttrs attrs, void *buf, hwaddr len);
2473 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2474 MemTxAttrs attrs, void *buf,
2475 hwaddr len, hwaddr addr1, hwaddr l,
2477 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2479 /* Internal functions, part of the implementation of address_space_read_cached
2480 * and address_space_write_cached. */
2481 MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
2482 hwaddr addr, void *buf, hwaddr len);
2483 MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
2484 hwaddr addr, const void *buf,
2487 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2490 return memory_region_is_ram(mr) && !mr->readonly &&
2491 !mr->rom_device && !memory_region_is_ram_device(mr);
2493 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2494 memory_region_is_romd(mr);
2499 * address_space_read: read from an address space.
2501 * Return a MemTxResult indicating whether the operation succeeded
2502 * or failed (eg unassigned memory, device rejected the transaction,
2503 * IOMMU fault). Called within RCU critical section.
2505 * @as: #AddressSpace to be accessed
2506 * @addr: address within that address space
2507 * @attrs: memory transaction attributes
2508 * @buf: buffer with the data transferred
2509 * @len: length of the data transferred
2511 static inline __attribute__((__always_inline__))
2512 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2513 MemTxAttrs attrs, void *buf,
2516 MemTxResult result = MEMTX_OK;
2522 if (__builtin_constant_p(len)) {
2524 RCU_READ_LOCK_GUARD();
2525 fv = address_space_to_flatview(as);
2527 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2528 if (len == l && memory_access_is_direct(mr, false)) {
2529 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2530 memcpy(buf, ptr, len);
2532 result = flatview_read_continue(fv, addr, attrs, buf, len,
2537 result = address_space_read_full(as, addr, attrs, buf, len);
2543 * address_space_read_cached: read from a cached RAM region
2545 * @cache: Cached region to be addressed
2546 * @addr: address relative to the base of the RAM region
2547 * @buf: buffer with the data transferred
2548 * @len: length of the data transferred
2550 static inline MemTxResult
2551 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
2552 void *buf, hwaddr len)
2554 assert(addr < cache->len && len <= cache->len - addr);
2555 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
2556 if (likely(cache->ptr)) {
2557 memcpy(buf, cache->ptr + addr, len);
2560 return address_space_read_cached_slow(cache, addr, buf, len);
2565 * address_space_write_cached: write to a cached RAM region
2567 * @cache: Cached region to be addressed
2568 * @addr: address relative to the base of the RAM region
2569 * @buf: buffer with the data transferred
2570 * @len: length of the data transferred
2572 static inline MemTxResult
2573 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
2574 const void *buf, hwaddr len)
2576 assert(addr < cache->len && len <= cache->len - addr);
2577 if (likely(cache->ptr)) {
2578 memcpy(cache->ptr + addr, buf, len);
2581 return address_space_write_cached_slow(cache, addr, buf, len);
2586 /* enum device_endian to MemOp. */
2587 static inline MemOp devend_memop(enum device_endian end)
2589 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
2590 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
2592 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
2593 /* Swap if non-host endianness or native (target) endianness */
2594 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
2596 const int non_host_endianness =
2597 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
2599 /* In this case, native (target) endianness needs no swap. */
2600 return (end == non_host_endianness) ? MO_BSWAP : 0;
2606 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
2607 * to manage the actual amount of memory consumed by the VM (then, the memory
2608 * provided by RAM blocks might be bigger than the desired memory consumption).
2609 * This *must* be set if:
2610 * - Discarding parts of a RAM blocks does not result in the change being
2611 * reflected in the VM and the pages getting freed.
2612 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
2614 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
2616 * Technologies that only temporarily pin the current working set of a
2617 * driver are fine, because we don't expect such pages to be discarded
2618 * (esp. based on guest action like balloon inflation).
2620 * This is *not* to be used to protect from concurrent discards (esp.,
2623 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
2624 * discards to work reliably is active.
2626 int ram_block_discard_disable(bool state);
2629 * Inhibit technologies that disable discarding of pages in RAM blocks.
2631 * Returns 0 if successful. Returns -EBUSY if discards are already set to
2634 int ram_block_discard_require(bool state);
2637 * Test if discarding of memory in ram blocks is disabled.
2639 bool ram_block_discard_is_disabled(void);
2642 * Test if discarding of memory in ram blocks is required to work reliably.
2644 bool ram_block_discard_is_required(void);