/* Unmap the whole notifier's range */
static void smmu_unmap_notifier_range(IOMMUNotifier *n)
{
- IOMMUTLBEntry entry;
+ IOMMUTLBEvent event;
- entry.target_as = &address_space_memory;
- entry.iova = n->start;
- entry.perm = IOMMU_NONE;
- entry.addr_mask = n->end - n->start;
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.target_as = &address_space_memory;
+ event.entry.iova = n->start;
+ event.entry.perm = IOMMU_NONE;
+ event.entry.addr_mask = n->end - n->start;
- memory_region_notify_iommu_one(n, &entry);
+ memory_region_notify_iommu_one(n, &event);
}
/* Unmap all notifiers attached to @mr */
uint8_t tg, uint64_t num_pages)
{
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
- IOMMUTLBEntry entry;
+ IOMMUTLBEvent event;
uint8_t granule = tg;
if (!tg) {
granule = tt->granule_sz;
}
- entry.target_as = &address_space_memory;
- entry.iova = iova;
- entry.addr_mask = num_pages * (1 << granule) - 1;
- entry.perm = IOMMU_NONE;
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.target_as = &address_space_memory;
+ event.entry.iova = iova;
+ event.entry.addr_mask = num_pages * (1 << granule) - 1;
+ event.entry.perm = IOMMU_NONE;
- memory_region_notify_iommu_one(n, &entry);
+ memory_region_notify_iommu_one(n, &event);
}
/* invalidate an asid/iova range tuple in all mr's */
}
}
-typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private);
+typedef int (*vtd_page_walk_hook)(IOMMUTLBEvent *event, void *private);
/**
* Constant information used during page walking
uint16_t domain_id;
} vtd_page_walk_info;
-static int vtd_page_walk_one(IOMMUTLBEntry *entry, vtd_page_walk_info *info)
+static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info)
{
VTDAddressSpace *as = info->as;
vtd_page_walk_hook hook_fn = info->hook_fn;
void *private = info->private;
+ IOMMUTLBEntry *entry = &event->entry;
DMAMap target = {
.iova = entry->iova,
.size = entry->addr_mask,
};
DMAMap *mapped = iova_tree_find(as->iova_tree, &target);
- if (entry->perm == IOMMU_NONE && !info->notify_unmap) {
+ if (event->type == IOMMU_NOTIFIER_UNMAP && !info->notify_unmap) {
trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask);
return 0;
}
assert(hook_fn);
/* Update local IOVA mapped ranges */
- if (entry->perm) {
+ if (event->type == IOMMU_NOTIFIER_MAP) {
if (mapped) {
/* If it's exactly the same translation, skip */
if (!memcmp(mapped, &target, sizeof(target))) {
int ret;
/* Emulate an UNMAP */
+ event->type = IOMMU_NOTIFIER_UNMAP;
entry->perm = IOMMU_NONE;
trace_vtd_page_walk_one(info->domain_id,
entry->iova,
entry->translated_addr,
entry->addr_mask,
entry->perm);
- ret = hook_fn(entry, private);
+ ret = hook_fn(event, private);
if (ret) {
return ret;
}
/* Drop any existing mapping */
iova_tree_remove(as->iova_tree, &target);
- /* Recover the correct permission */
+ /* Recover the correct type */
+ event->type = IOMMU_NOTIFIER_MAP;
entry->perm = cache_perm;
}
}
trace_vtd_page_walk_one(info->domain_id, entry->iova,
entry->translated_addr, entry->addr_mask,
entry->perm);
- return hook_fn(entry, private);
+ return hook_fn(event, private);
}
/**
uint32_t offset;
uint64_t slpte;
uint64_t subpage_size, subpage_mask;
- IOMMUTLBEntry entry;
+ IOMMUTLBEvent event;
uint64_t iova = start;
uint64_t iova_next;
int ret = 0;
*
* In either case, we send an IOTLB notification down.
*/
- entry.target_as = &address_space_memory;
- entry.iova = iova & subpage_mask;
- entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur);
- entry.addr_mask = ~subpage_mask;
+ event.entry.target_as = &address_space_memory;
+ event.entry.iova = iova & subpage_mask;
+ event.entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur);
+ event.entry.addr_mask = ~subpage_mask;
/* NOTE: this is only meaningful if entry_valid == true */
- entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw);
- ret = vtd_page_walk_one(&entry, info);
+ event.entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw);
+ event.type = event.entry.perm ? IOMMU_NOTIFIER_MAP :
+ IOMMU_NOTIFIER_UNMAP;
+ ret = vtd_page_walk_one(&event, info);
}
if (ret < 0) {
return 0;
}
-static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry,
+static int vtd_sync_shadow_page_hook(IOMMUTLBEvent *event,
void *private)
{
- memory_region_notify_iommu((IOMMUMemoryRegion *)private, 0, *entry);
+ memory_region_notify_iommu(private, 0, *event);
return 0;
}
* page tables. We just deliver the PSI down to
* invalidate caches.
*/
- IOMMUTLBEntry entry = {
- .target_as = &address_space_memory,
- .iova = addr,
- .translated_addr = 0,
- .addr_mask = size - 1,
- .perm = IOMMU_NONE,
+ IOMMUTLBEvent event = {
+ .type = IOMMU_NOTIFIER_UNMAP,
+ .entry = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = 0,
+ .addr_mask = size - 1,
+ .perm = IOMMU_NONE,
+ },
};
- memory_region_notify_iommu(&vtd_as->iommu, 0, entry);
+ memory_region_notify_iommu(&vtd_as->iommu, 0, event);
}
}
}
VTDInvDesc *inv_desc)
{
VTDAddressSpace *vtd_dev_as;
- IOMMUTLBEntry entry;
+ IOMMUTLBEvent event;
struct VTDBus *vtd_bus;
hwaddr addr;
uint64_t sz;
sz = VTD_PAGE_SIZE;
}
- entry.target_as = &vtd_dev_as->as;
- entry.addr_mask = sz - 1;
- entry.iova = addr;
- entry.perm = IOMMU_NONE;
- entry.translated_addr = 0;
- memory_region_notify_iommu(&vtd_dev_as->iommu, 0, entry);
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.target_as = &vtd_dev_as->as;
+ event.entry.addr_mask = sz - 1;
+ event.entry.iova = addr;
+ event.entry.perm = IOMMU_NONE;
+ event.entry.translated_addr = 0;
+ memory_region_notify_iommu(&vtd_dev_as->iommu, 0, event);
done:
return true;
size = remain = end - start + 1;
while (remain >= VTD_PAGE_SIZE) {
- IOMMUTLBEntry entry;
+ IOMMUTLBEvent event;
uint64_t mask = get_naturally_aligned_size(start, remain, s->aw_bits);
assert(mask);
- entry.iova = start;
- entry.addr_mask = mask - 1;
- entry.target_as = &address_space_memory;
- entry.perm = IOMMU_NONE;
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.iova = start;
+ event.entry.addr_mask = mask - 1;
+ event.entry.target_as = &address_space_memory;
+ event.entry.perm = IOMMU_NONE;
/* This field is meaningless for unmap */
- entry.translated_addr = 0;
+ event.entry.translated_addr = 0;
- memory_region_notify_iommu_one(n, &entry);
+ memory_region_notify_iommu_one(n, &event);
start += mask;
remain -= mask;
vtd_switch_address_space_all(s);
}
-static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private)
+static int vtd_replay_hook(IOMMUTLBEvent *event, void *private)
{
- memory_region_notify_iommu_one((IOMMUNotifier *)private, entry);
+ memory_region_notify_iommu_one(private, event);
return 0;
}
/* Called when the LUT word at lutidx has changed from oldlut to newlut;
* must call the IOMMU notifiers for the changed blocks.
*/
- IOMMUTLBEntry entry = {
- .addr_mask = s->blocksize - 1,
+ IOMMUTLBEvent event = {
+ .entry = {
+ .addr_mask = s->blocksize - 1,
+ }
};
hwaddr addr = lutidx * s->blocksize * 32;
int i;
block_is_ns = newlut & (1 << i);
trace_tz_mpc_iommu_notify(addr);
- entry.iova = addr;
- entry.translated_addr = addr;
+ event.entry.iova = addr;
+ event.entry.translated_addr = addr;
- entry.perm = IOMMU_NONE;
- memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
- memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.perm = IOMMU_NONE;
+ memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event);
+ memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event);
- entry.perm = IOMMU_RW;
+ event.type = IOMMU_NOTIFIER_MAP;
+ event.entry.perm = IOMMU_RW;
if (block_is_ns) {
- entry.target_as = &s->blocked_io_as;
+ event.entry.target_as = &s->blocked_io_as;
} else {
- entry.target_as = &s->downstream_as;
+ event.entry.target_as = &s->downstream_as;
}
- memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
+ memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, event);
if (block_is_ns) {
- entry.target_as = &s->downstream_as;
+ event.entry.target_as = &s->downstream_as;
} else {
- entry.target_as = &s->blocked_io_as;
+ event.entry.target_as = &s->blocked_io_as;
}
- memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
+ memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, event);
}
}
static target_ulong put_tce_emu(SpaprTceTable *tcet, target_ulong ioba,
target_ulong tce)
{
- IOMMUTLBEntry entry;
+ IOMMUTLBEvent event;
hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
tcet->table[index] = tce;
- entry.target_as = &address_space_memory,
- entry.iova = (ioba - tcet->bus_offset) & page_mask;
- entry.translated_addr = tce & page_mask;
- entry.addr_mask = ~page_mask;
- entry.perm = spapr_tce_iommu_access_flags(tce);
- memory_region_notify_iommu(&tcet->iommu, 0, entry);
+ event.entry.target_as = &address_space_memory,
+ event.entry.iova = (ioba - tcet->bus_offset) & page_mask;
+ event.entry.translated_addr = tce & page_mask;
+ event.entry.addr_mask = ~page_mask;
+ event.entry.perm = spapr_tce_iommu_access_flags(tce);
+ event.type = event.entry.perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP;
+ memory_region_notify_iommu(&tcet->iommu, 0, event);
return H_SUCCESS;
}
S390IOTLBEntry *entry)
{
S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova);
- IOMMUTLBEntry notify = {
- .target_as = &address_space_memory,
- .iova = entry->iova,
- .translated_addr = entry->translated_addr,
- .perm = entry->perm,
- .addr_mask = ~PAGE_MASK,
+ IOMMUTLBEvent event = {
+ .type = entry->perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP,
+ .entry = {
+ .target_as = &address_space_memory,
+ .iova = entry->iova,
+ .translated_addr = entry->translated_addr,
+ .perm = entry->perm,
+ .addr_mask = ~PAGE_MASK,
+ },
};
- if (entry->perm == IOMMU_NONE) {
+ if (event.type == IOMMU_NOTIFIER_UNMAP) {
if (!cache) {
goto out;
}
goto out;
}
- notify.perm = IOMMU_NONE;
- memory_region_notify_iommu(&iommu->iommu_mr, 0, notify);
- notify.perm = entry->perm;
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.perm = IOMMU_NONE;
+ memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
+ event.type = IOMMU_NOTIFIER_MAP;
+ event.entry.perm = entry->perm;
}
cache = g_new(S390IOTLBEntry, 1);
dec_dma_avail(iommu);
}
- memory_region_notify_iommu(&iommu->iommu_mr, 0, notify);
+ memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
out:
return iommu->dma_limit ? iommu->dma_limit->avail : 1;
hwaddr virt_end, hwaddr paddr,
uint32_t flags)
{
- IOMMUTLBEntry entry;
+ IOMMUTLBEvent event;
IOMMUAccessFlags perm = IOMMU_ACCESS_FLAG(flags & VIRTIO_IOMMU_MAP_F_READ,
flags & VIRTIO_IOMMU_MAP_F_WRITE);
trace_virtio_iommu_notify_map(mr->parent_obj.name, virt_start, virt_end,
paddr, perm);
- entry.target_as = &address_space_memory;
- entry.addr_mask = virt_end - virt_start;
- entry.iova = virt_start;
- entry.perm = perm;
- entry.translated_addr = paddr;
+ event.type = IOMMU_NOTIFIER_MAP;
+ event.entry.target_as = &address_space_memory;
+ event.entry.addr_mask = virt_end - virt_start;
+ event.entry.iova = virt_start;
+ event.entry.perm = perm;
+ event.entry.translated_addr = paddr;
- memory_region_notify_iommu(mr, 0, entry);
+ memory_region_notify_iommu(mr, 0, event);
}
static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start,
hwaddr virt_end)
{
- IOMMUTLBEntry entry;
+ IOMMUTLBEvent event;
if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) {
return;
trace_virtio_iommu_notify_unmap(mr->parent_obj.name, virt_start, virt_end);
- entry.target_as = &address_space_memory;
- entry.addr_mask = virt_end - virt_start;
- entry.iova = virt_start;
- entry.perm = IOMMU_NONE;
- entry.translated_addr = 0;
+ event.type = IOMMU_NOTIFIER_UNMAP;
+ event.entry.target_as = &address_space_memory;
+ event.entry.addr_mask = virt_end - virt_start;
+ event.entry.iova = virt_start;
+ event.entry.perm = IOMMU_NONE;
+ event.entry.translated_addr = 0;
- memory_region_notify_iommu(mr, 0, entry);
+ memory_region_notify_iommu(mr, 0, event);
}
static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value,
};
typedef struct IOMMUNotifier IOMMUNotifier;
+typedef struct IOMMUTLBEvent {
+ IOMMUNotifierFlag type;
+ IOMMUTLBEntry entry;
+} IOMMUTLBEvent;
+
/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
#define RAM_PREALLOC (1 << 0)
/**
* memory_region_notify_iommu: notify a change in an IOMMU translation entry.
*
- * The notification type will be decided by entry.perm bits:
- *
- * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
- * - For MAP (newly added entry) notifies: set entry.perm to the
- * permission of the page (which is definitely !IOMMU_NONE).
- *
* Note: for any IOMMU implementation, an in-place mapping change
* should be notified with an UNMAP followed by a MAP.
*
* @iommu_mr: the memory region that was changed
* @iommu_idx: the IOMMU index for the translation table which has changed
- * @entry: the new entry in the IOMMU translation table. The entry
- * replaces all old entries for the same virtual I/O address range.
- * Deleted entries have .@perm == 0.
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
*/
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
int iommu_idx,
- IOMMUTLBEntry entry);
+ IOMMUTLBEvent event);
/**
* memory_region_notify_iommu_one: notify a change in an IOMMU translation
* notifies a specific notifier, not all of them.
*
* @notifier: the notifier to be notified
- * @entry: the new entry in the IOMMU translation table. The entry
- * replaces all old entries for the same virtual I/O address range.
- * Deleted entries have .@perm == 0.
+ * @event: TLB event with the new entry in the IOMMU translation table.
+ * The entry replaces all old entries for the same virtual I/O address
+ * range.
*/
void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
- IOMMUTLBEntry *entry);
+ IOMMUTLBEvent *event);
/**
* memory_region_register_iommu_notifier: register a notifier for changes to
}
void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
- IOMMUTLBEntry *entry)
+ IOMMUTLBEvent *event)
{
- IOMMUNotifierFlag request_flags;
+ IOMMUTLBEntry *entry = &event->entry;
hwaddr entry_end = entry->iova + entry->addr_mask;
+ if (event->type == IOMMU_NOTIFIER_UNMAP) {
+ assert(entry->perm == IOMMU_NONE);
+ }
+
/*
* Skip the notification if the notification does not overlap
* with registered range.
assert(entry->iova >= notifier->start && entry_end <= notifier->end);
- if (entry->perm & IOMMU_RW) {
- request_flags = IOMMU_NOTIFIER_MAP;
- } else {
- request_flags = IOMMU_NOTIFIER_UNMAP;
- }
-
- if (notifier->notifier_flags & request_flags) {
+ if (event->type & notifier->notifier_flags) {
notifier->notify(notifier, entry);
}
}
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
int iommu_idx,
- IOMMUTLBEntry entry)
+ IOMMUTLBEvent event)
{
IOMMUNotifier *iommu_notifier;
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
if (iommu_notifier->iommu_idx == iommu_idx) {
- memory_region_notify_iommu_one(iommu_notifier, &entry);
+ memory_region_notify_iommu_one(iommu_notifier, &event);
}
}
}