4 * Copyright Red Hat, Inc. 2010
7 * Michael S. Tsirkin <mst@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/vhost.h"
19 #include "qemu/atomic.h"
20 #include "qemu/range.h"
21 #include "qemu/error-report.h"
22 #include "qemu/memfd.h"
24 #include "standard-headers/linux/vhost_types.h"
25 #include "hw/virtio/virtio-bus.h"
26 #include "migration/blocker.h"
27 #include "migration/qemu-file-types.h"
28 #include "sysemu/dma.h"
31 /* enabled until disconnected backend stabilizes */
32 #define _VHOST_DEBUG 1
35 #define VHOST_OPS_DEBUG(retval, fmt, ...) \
37 error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38 strerror(-retval), -retval); \
41 #define VHOST_OPS_DEBUG(retval, fmt, ...) \
45 static struct vhost_log *vhost_log;
46 static struct vhost_log *vhost_log_shm;
48 static unsigned int used_memslots;
49 static QLIST_HEAD(, vhost_dev) vhost_devices =
50 QLIST_HEAD_INITIALIZER(vhost_devices);
52 bool vhost_has_free_slot(void)
54 unsigned int slots_limit = ~0U;
55 struct vhost_dev *hdev;
57 QLIST_FOREACH(hdev, &vhost_devices, entry) {
58 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
59 slots_limit = MIN(slots_limit, r);
61 return slots_limit > used_memslots;
64 static void vhost_dev_sync_region(struct vhost_dev *dev,
65 MemoryRegionSection *section,
66 uint64_t mfirst, uint64_t mlast,
67 uint64_t rfirst, uint64_t rlast)
69 vhost_log_chunk_t *log = dev->log->log;
71 uint64_t start = MAX(mfirst, rfirst);
72 uint64_t end = MIN(mlast, rlast);
73 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
74 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
75 uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
80 assert(end / VHOST_LOG_CHUNK < dev->log_size);
81 assert(start / VHOST_LOG_CHUNK < dev->log_size);
83 for (;from < to; ++from) {
84 vhost_log_chunk_t log;
85 /* We first check with non-atomic: much cheaper,
86 * and we expect non-dirty to be the common case. */
88 addr += VHOST_LOG_CHUNK;
91 /* Data must be read atomically. We don't really need barrier semantics
92 * but it's easier to use atomic_* than roll our own. */
93 log = qatomic_xchg(from, 0);
97 hwaddr section_offset;
99 page_addr = addr + bit * VHOST_LOG_PAGE;
100 section_offset = page_addr - section->offset_within_address_space;
101 mr_offset = section_offset + section->offset_within_region;
102 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
103 log &= ~(0x1ull << bit);
105 addr += VHOST_LOG_CHUNK;
109 bool vhost_dev_has_iommu(struct vhost_dev *dev)
111 VirtIODevice *vdev = dev->vdev;
114 * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
115 * incremental memory mapping API via IOTLB API. For platform that
116 * does not have IOMMU, there's no need to enable this feature
117 * which may cause unnecessary IOTLB miss/update transactions.
120 return virtio_bus_device_iommu_enabled(vdev) &&
121 virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
127 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
128 MemoryRegionSection *section,
136 if (!dev->log_enabled || !dev->started) {
139 start_addr = section->offset_within_address_space;
140 end_addr = range_get_last(start_addr, int128_get64(section->size));
141 start_addr = MAX(first, start_addr);
142 end_addr = MIN(last, end_addr);
144 for (i = 0; i < dev->mem->nregions; ++i) {
145 struct vhost_memory_region *reg = dev->mem->regions + i;
146 vhost_dev_sync_region(dev, section, start_addr, end_addr,
147 reg->guest_phys_addr,
148 range_get_last(reg->guest_phys_addr,
151 for (i = 0; i < dev->nvqs; ++i) {
152 struct vhost_virtqueue *vq = dev->vqs + i;
154 if (!vq->used_phys && !vq->used_size) {
158 if (vhost_dev_has_iommu(dev)) {
160 hwaddr used_phys = vq->used_phys, used_size = vq->used_size;
161 hwaddr phys, s, offset;
165 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
168 MEMTXATTRS_UNSPECIFIED);
171 if (!iotlb.target_as) {
172 qemu_log_mask(LOG_GUEST_ERROR, "translation "
173 "failure for used_iova %"PRIx64"\n",
178 offset = used_phys & iotlb.addr_mask;
179 phys = iotlb.translated_addr + offset;
182 * Distance from start of used ring until last byte of
185 s = iotlb.addr_mask - offset;
187 * Size of used ring, or of the part of it until end
188 * of IOMMU page. To avoid zero result, do the adding
191 s = MIN(s, used_size - 1) + 1;
193 vhost_dev_sync_region(dev, section, start_addr, end_addr, phys,
194 range_get_last(phys, s));
199 vhost_dev_sync_region(dev, section, start_addr,
200 end_addr, vq->used_phys,
201 range_get_last(vq->used_phys, vq->used_size));
207 static void vhost_log_sync(MemoryListener *listener,
208 MemoryRegionSection *section)
210 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
212 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
215 static void vhost_log_sync_range(struct vhost_dev *dev,
216 hwaddr first, hwaddr last)
219 /* FIXME: this is N^2 in number of sections */
220 for (i = 0; i < dev->n_mem_sections; ++i) {
221 MemoryRegionSection *section = &dev->mem_sections[i];
222 vhost_sync_dirty_bitmap(dev, section, first, last);
226 static uint64_t vhost_get_log_size(struct vhost_dev *dev)
228 uint64_t log_size = 0;
230 for (i = 0; i < dev->mem->nregions; ++i) {
231 struct vhost_memory_region *reg = dev->mem->regions + i;
232 uint64_t last = range_get_last(reg->guest_phys_addr,
234 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
239 static int vhost_set_backend_type(struct vhost_dev *dev,
240 VhostBackendType backend_type)
244 switch (backend_type) {
245 #ifdef CONFIG_VHOST_KERNEL
246 case VHOST_BACKEND_TYPE_KERNEL:
247 dev->vhost_ops = &kernel_ops;
250 #ifdef CONFIG_VHOST_USER
251 case VHOST_BACKEND_TYPE_USER:
252 dev->vhost_ops = &user_ops;
255 #ifdef CONFIG_VHOST_VDPA
256 case VHOST_BACKEND_TYPE_VDPA:
257 dev->vhost_ops = &vdpa_ops;
261 error_report("Unknown vhost backend type");
268 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
271 struct vhost_log *log;
272 uint64_t logsize = size * sizeof(*(log->log));
275 log = g_new0(struct vhost_log, 1);
277 log->log = qemu_memfd_alloc("vhost-log", logsize,
278 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
281 error_report_err(err);
285 memset(log->log, 0, logsize);
287 log->log = g_malloc0(logsize);
297 static struct vhost_log *vhost_log_get(uint64_t size, bool share)
299 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
301 if (!log || log->size != size) {
302 log = vhost_log_alloc(size, share);
315 static void vhost_log_put(struct vhost_dev *dev, bool sync)
317 struct vhost_log *log = dev->log;
324 if (log->refcnt == 0) {
325 /* Sync only the range covered by the old log */
326 if (dev->log_size && sync) {
327 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
330 if (vhost_log == log) {
333 } else if (vhost_log_shm == log) {
334 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
336 vhost_log_shm = NULL;
346 static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
348 return dev->vhost_ops->vhost_requires_shm_log &&
349 dev->vhost_ops->vhost_requires_shm_log(dev);
352 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
354 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
355 uint64_t log_base = (uintptr_t)log->log;
358 /* inform backend of log switching, this must be done before
359 releasing the current log, to ensure no logging is lost */
360 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
362 VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
365 vhost_log_put(dev, true);
367 dev->log_size = size;
370 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
371 hwaddr *plen, bool is_write)
373 if (!vhost_dev_has_iommu(dev)) {
374 return cpu_physical_memory_map(addr, plen, is_write);
376 return (void *)(uintptr_t)addr;
380 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
381 hwaddr len, int is_write,
384 if (!vhost_dev_has_iommu(dev)) {
385 cpu_physical_memory_unmap(buffer, len, is_write, access_len);
389 static int vhost_verify_ring_part_mapping(void *ring_hva,
396 uint64_t hva_ring_offset;
397 uint64_t ring_last = range_get_last(ring_gpa, ring_size);
398 uint64_t reg_last = range_get_last(reg_gpa, reg_size);
400 if (ring_last < reg_gpa || ring_gpa > reg_last) {
403 /* check that whole ring's is mapped */
404 if (ring_last > reg_last) {
407 /* check that ring's MemoryRegion wasn't replaced */
408 hva_ring_offset = ring_gpa - reg_gpa;
409 if (ring_hva != reg_hva + hva_ring_offset) {
416 static int vhost_verify_ring_mappings(struct vhost_dev *dev,
423 const char *part_name[] = {
429 if (vhost_dev_has_iommu(dev)) {
433 for (i = 0; i < dev->nvqs; ++i) {
434 struct vhost_virtqueue *vq = dev->vqs + i;
436 if (vq->desc_phys == 0) {
441 r = vhost_verify_ring_part_mapping(
442 vq->desc, vq->desc_phys, vq->desc_size,
443 reg_hva, reg_gpa, reg_size);
449 r = vhost_verify_ring_part_mapping(
450 vq->avail, vq->avail_phys, vq->avail_size,
451 reg_hva, reg_gpa, reg_size);
457 r = vhost_verify_ring_part_mapping(
458 vq->used, vq->used_phys, vq->used_size,
459 reg_hva, reg_gpa, reg_size);
466 error_report("Unable to map %s for ring %d", part_name[j], i);
467 } else if (r == -EBUSY) {
468 error_report("%s relocated for ring %d", part_name[j], i);
474 * vhost_section: identify sections needed for vhost access
476 * We only care about RAM sections here (where virtqueue and guest
477 * internals accessed by virtio might live). If we find one we still
478 * allow the backend to potentially filter it out of our list.
480 static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
482 MemoryRegion *mr = section->mr;
484 if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) {
485 uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr);
486 uint8_t handled_dirty;
489 * Kernel based vhost doesn't handle any block which is doing
490 * dirty-tracking other than migration for which it has
491 * specific logging support. However for TCG the kernel never
492 * gets involved anyway so we can also ignore it's
493 * self-modiying code detection flags. However a vhost-user
494 * client could still confuse a TCG guest if it re-writes
495 * executable memory that has already been translated.
497 handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) |
498 (1 << DIRTY_MEMORY_CODE);
500 if (dirty_mask & ~handled_dirty) {
501 trace_vhost_reject_section(mr->name, 1);
505 if (dev->vhost_ops->vhost_backend_mem_section_filter &&
506 !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) {
507 trace_vhost_reject_section(mr->name, 2);
511 trace_vhost_section(mr->name);
514 trace_vhost_reject_section(mr->name, 3);
519 static void vhost_begin(MemoryListener *listener)
521 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
523 dev->tmp_sections = NULL;
524 dev->n_tmp_sections = 0;
527 static void vhost_commit(MemoryListener *listener)
529 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
531 MemoryRegionSection *old_sections;
537 bool changed = false;
539 /* Note we can be called before the device is started, but then
540 * starting the device calls set_mem_table, so we need to have
541 * built the data structures.
543 old_sections = dev->mem_sections;
544 n_old_sections = dev->n_mem_sections;
545 dev->mem_sections = dev->tmp_sections;
546 dev->n_mem_sections = dev->n_tmp_sections;
548 if (dev->n_mem_sections != n_old_sections) {
551 /* Same size, lets check the contents */
552 for (int i = 0; i < n_old_sections; i++) {
553 if (!MemoryRegionSection_eq(&old_sections[i],
554 &dev->mem_sections[i])) {
561 trace_vhost_commit(dev->started, changed);
566 /* Rebuild the regions list from the new sections list */
567 regions_size = offsetof(struct vhost_memory, regions) +
568 dev->n_mem_sections * sizeof dev->mem->regions[0];
569 dev->mem = g_realloc(dev->mem, regions_size);
570 dev->mem->nregions = dev->n_mem_sections;
571 used_memslots = dev->mem->nregions;
572 for (i = 0; i < dev->n_mem_sections; i++) {
573 struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
574 struct MemoryRegionSection *mrs = dev->mem_sections + i;
576 cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
577 cur_vmr->memory_size = int128_get64(mrs->size);
578 cur_vmr->userspace_addr =
579 (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
580 mrs->offset_within_region;
581 cur_vmr->flags_padding = 0;
588 for (i = 0; i < dev->mem->nregions; i++) {
589 if (vhost_verify_ring_mappings(dev,
590 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
591 dev->mem->regions[i].guest_phys_addr,
592 dev->mem->regions[i].memory_size)) {
593 error_report("Verify ring failure on region %d", i);
598 if (!dev->log_enabled) {
599 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
601 VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
605 log_size = vhost_get_log_size(dev);
606 /* We allocate an extra 4K bytes to log,
607 * to reduce the * number of reallocations. */
608 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
609 /* To log more, must increase log size before table update. */
610 if (dev->log_size < log_size) {
611 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
613 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
615 VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
617 /* To log less, can only decrease log size after table update. */
618 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
619 vhost_dev_log_resize(dev, log_size);
623 /* Deref the old list of sections, this must happen _after_ the
624 * vhost_set_mem_table to ensure the client isn't still using the
625 * section we're about to unref.
627 while (n_old_sections--) {
628 memory_region_unref(old_sections[n_old_sections].mr);
630 g_free(old_sections);
634 /* Adds the section data to the tmp_section structure.
635 * It relies on the listener calling us in memory address order
636 * and for each region (via the _add and _nop methods) to
639 static void vhost_region_add_section(struct vhost_dev *dev,
640 MemoryRegionSection *section)
642 bool need_add = true;
643 uint64_t mrs_size = int128_get64(section->size);
644 uint64_t mrs_gpa = section->offset_within_address_space;
645 uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
646 section->offset_within_region;
647 RAMBlock *mrs_rb = section->mr->ram_block;
649 trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
652 if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
653 /* Round the section to it's page size */
654 /* First align the start down to a page boundary */
655 size_t mrs_page = qemu_ram_pagesize(mrs_rb);
656 uint64_t alignage = mrs_host & (mrs_page - 1);
658 mrs_host -= alignage;
659 mrs_size += alignage;
662 /* Now align the size up to a page boundary */
663 alignage = mrs_size & (mrs_page - 1);
665 mrs_size += mrs_page - alignage;
667 trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa,
671 if (dev->n_tmp_sections) {
672 /* Since we already have at least one section, lets see if
673 * this extends it; since we're scanning in order, we only
674 * have to look at the last one, and the FlatView that calls
675 * us shouldn't have overlaps.
677 MemoryRegionSection *prev_sec = dev->tmp_sections +
678 (dev->n_tmp_sections - 1);
679 uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
680 uint64_t prev_size = int128_get64(prev_sec->size);
681 uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size);
682 uint64_t prev_host_start =
683 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
684 prev_sec->offset_within_region;
685 uint64_t prev_host_end = range_get_last(prev_host_start, prev_size);
687 if (mrs_gpa <= (prev_gpa_end + 1)) {
688 /* OK, looks like overlapping/intersecting - it's possible that
689 * the rounding to page sizes has made them overlap, but they should
690 * match up in the same RAMBlock if they do.
692 if (mrs_gpa < prev_gpa_start) {
693 error_report("%s:Section '%s' rounded to %"PRIx64
694 " prior to previous '%s' %"PRIx64,
695 __func__, section->mr->name, mrs_gpa,
696 prev_sec->mr->name, prev_gpa_start);
697 /* A way to cleanly fail here would be better */
700 /* Offset from the start of the previous GPA to this GPA */
701 size_t offset = mrs_gpa - prev_gpa_start;
703 if (prev_host_start + offset == mrs_host &&
704 section->mr == prev_sec->mr &&
705 (!dev->vhost_ops->vhost_backend_can_merge ||
706 dev->vhost_ops->vhost_backend_can_merge(dev,
708 prev_host_start, prev_size))) {
709 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
711 prev_sec->offset_within_address_space =
712 MIN(prev_gpa_start, mrs_gpa);
713 prev_sec->offset_within_region =
714 MIN(prev_host_start, mrs_host) -
715 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
716 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
718 trace_vhost_region_add_section_merge(section->mr->name,
719 int128_get64(prev_sec->size),
720 prev_sec->offset_within_address_space,
721 prev_sec->offset_within_region);
723 /* adjoining regions are fine, but overlapping ones with
724 * different blocks/offsets shouldn't happen
726 if (mrs_gpa != prev_gpa_end + 1) {
727 error_report("%s: Overlapping but not coherent sections "
737 ++dev->n_tmp_sections;
738 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
739 dev->n_tmp_sections);
740 dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
741 /* The flatview isn't stable and we don't use it, making it NULL
742 * means we can memcmp the list.
744 dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
745 memory_region_ref(section->mr);
749 /* Used for both add and nop callbacks */
750 static void vhost_region_addnop(MemoryListener *listener,
751 MemoryRegionSection *section)
753 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
756 if (!vhost_section(dev, section)) {
759 vhost_region_add_section(dev, section);
762 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
764 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
765 struct vhost_dev *hdev = iommu->hdev;
766 hwaddr iova = iotlb->iova + iommu->iommu_offset;
768 if (vhost_backend_invalidate_device_iotlb(hdev, iova,
769 iotlb->addr_mask + 1)) {
770 error_report("Fail to invalidate device iotlb");
774 static void vhost_iommu_region_add(MemoryListener *listener,
775 MemoryRegionSection *section)
777 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
779 struct vhost_iommu *iommu;
782 IOMMUMemoryRegion *iommu_mr;
785 if (!memory_region_is_iommu(section->mr)) {
789 iommu_mr = IOMMU_MEMORY_REGION(section->mr);
791 iommu = g_malloc0(sizeof(*iommu));
792 end = int128_add(int128_make64(section->offset_within_region),
794 end = int128_sub(end, int128_one());
795 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
796 MEMTXATTRS_UNSPECIFIED);
797 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
798 IOMMU_NOTIFIER_DEVIOTLB_UNMAP,
799 section->offset_within_region,
802 iommu->mr = section->mr;
803 iommu->iommu_offset = section->offset_within_address_space -
804 section->offset_within_region;
806 ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
809 * Some vIOMMUs do not support dev-iotlb yet. If so, try to use the
810 * UNMAP legacy message
812 iommu->n.notifier_flags = IOMMU_NOTIFIER_UNMAP;
813 memory_region_register_iommu_notifier(section->mr, &iommu->n,
816 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
817 /* TODO: can replay help performance here? */
820 static void vhost_iommu_region_del(MemoryListener *listener,
821 MemoryRegionSection *section)
823 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
825 struct vhost_iommu *iommu;
827 if (!memory_region_is_iommu(section->mr)) {
831 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
832 if (iommu->mr == section->mr &&
833 iommu->n.start == section->offset_within_region) {
834 memory_region_unregister_iommu_notifier(iommu->mr,
836 QLIST_REMOVE(iommu, iommu_next);
843 static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
844 struct vhost_virtqueue *vq,
845 unsigned idx, bool enable_log)
847 struct vhost_vring_addr addr;
849 memset(&addr, 0, sizeof(struct vhost_vring_addr));
851 if (dev->vhost_ops->vhost_vq_get_addr) {
852 r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq);
854 VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed");
858 addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc;
859 addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail;
860 addr.used_user_addr = (uint64_t)(unsigned long)vq->used;
863 addr.log_guest_addr = vq->used_phys;
864 addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
865 r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
867 VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed");
872 static int vhost_dev_set_features(struct vhost_dev *dev,
875 uint64_t features = dev->acked_features;
878 features |= 0x1ULL << VHOST_F_LOG_ALL;
880 if (!vhost_dev_has_iommu(dev)) {
881 features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM);
883 if (dev->vhost_ops->vhost_force_iommu) {
884 if (dev->vhost_ops->vhost_force_iommu(dev) == true) {
885 features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM;
888 r = dev->vhost_ops->vhost_set_features(dev, features);
890 VHOST_OPS_DEBUG(r, "vhost_set_features failed");
893 if (dev->vhost_ops->vhost_set_backend_cap) {
894 r = dev->vhost_ops->vhost_set_backend_cap(dev);
896 VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed");
905 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
910 r = vhost_dev_set_features(dev, enable_log);
914 for (i = 0; i < dev->nvqs; ++i) {
915 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
916 addr = virtio_queue_get_desc_addr(dev->vdev, idx);
919 * The queue might not be ready for start. If this
920 * is the case there is no reason to continue the process.
921 * The similar logic is used by the vhost_virtqueue_start()
926 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
934 for (; i >= 0; --i) {
935 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
936 addr = virtio_queue_get_desc_addr(dev->vdev, idx);
940 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
943 vhost_dev_set_features(dev, dev->log_enabled);
948 static int vhost_migration_log(MemoryListener *listener, bool enable)
950 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
953 if (enable == dev->log_enabled) {
957 dev->log_enabled = enable;
963 r = vhost_dev_set_log(dev, false);
965 goto check_dev_state;
967 vhost_log_put(dev, false);
969 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
970 r = vhost_dev_set_log(dev, true);
972 goto check_dev_state;
977 dev->log_enabled = enable;
979 * vhost-user-* devices could change their state during log
980 * initialization due to disconnect. So check dev state after
981 * vhost communication.
985 * Since device is in the stopped state, it is okay for
986 * migration. Return success.
991 /* An error occurred. */
992 dev->log_enabled = false;
998 static void vhost_log_global_start(MemoryListener *listener)
1002 r = vhost_migration_log(listener, true);
1008 static void vhost_log_global_stop(MemoryListener *listener)
1012 r = vhost_migration_log(listener, false);
1018 static void vhost_log_start(MemoryListener *listener,
1019 MemoryRegionSection *section,
1022 /* FIXME: implement */
1025 static void vhost_log_stop(MemoryListener *listener,
1026 MemoryRegionSection *section,
1029 /* FIXME: implement */
1032 /* The vhost driver natively knows how to handle the vrings of non
1033 * cross-endian legacy devices and modern devices. Only legacy devices
1034 * exposed to a bi-endian guest may require the vhost driver to use a
1035 * specific endianness.
1037 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
1039 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1043 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
1045 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
1049 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
1054 struct vhost_vring_state s = {
1055 .index = vhost_vq_index,
1056 .num = is_big_endian
1059 r = dev->vhost_ops->vhost_set_vring_endian(dev, &s);
1061 VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed");
1066 static int vhost_memory_region_lookup(struct vhost_dev *hdev,
1067 uint64_t gpa, uint64_t *uaddr,
1072 for (i = 0; i < hdev->mem->nregions; i++) {
1073 struct vhost_memory_region *reg = hdev->mem->regions + i;
1075 if (gpa >= reg->guest_phys_addr &&
1076 reg->guest_phys_addr + reg->memory_size > gpa) {
1077 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
1078 *len = reg->guest_phys_addr + reg->memory_size - gpa;
1086 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
1088 IOMMUTLBEntry iotlb;
1089 uint64_t uaddr, len;
1092 RCU_READ_LOCK_GUARD();
1094 trace_vhost_iotlb_miss(dev, 1);
1096 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
1098 MEMTXATTRS_UNSPECIFIED);
1099 if (iotlb.target_as != NULL) {
1100 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
1103 trace_vhost_iotlb_miss(dev, 3);
1104 error_report("Fail to lookup the translated address "
1105 "%"PRIx64, iotlb.translated_addr);
1109 len = MIN(iotlb.addr_mask + 1, len);
1110 iova = iova & ~iotlb.addr_mask;
1112 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
1115 trace_vhost_iotlb_miss(dev, 4);
1116 error_report("Fail to update device iotlb");
1121 trace_vhost_iotlb_miss(dev, 2);
1127 int vhost_virtqueue_start(struct vhost_dev *dev,
1128 struct VirtIODevice *vdev,
1129 struct vhost_virtqueue *vq,
1132 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1133 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1134 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1137 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1138 struct vhost_vring_file file = {
1139 .index = vhost_vq_index
1141 struct vhost_vring_state state = {
1142 .index = vhost_vq_index
1144 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1146 a = virtio_queue_get_desc_addr(vdev, idx);
1148 /* Queue might not be ready for start */
1152 vq->num = state.num = virtio_queue_get_num(vdev, idx);
1153 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1155 VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed");
1159 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1160 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1162 VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed");
1166 if (vhost_needs_vring_endian(vdev)) {
1167 r = vhost_virtqueue_set_vring_endian_legacy(dev,
1168 virtio_is_big_endian(vdev),
1175 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1177 vq->desc = vhost_memory_map(dev, a, &l, false);
1178 if (!vq->desc || l != s) {
1180 goto fail_alloc_desc;
1182 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1183 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1184 vq->avail = vhost_memory_map(dev, a, &l, false);
1185 if (!vq->avail || l != s) {
1187 goto fail_alloc_avail;
1189 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1190 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1191 vq->used = vhost_memory_map(dev, a, &l, true);
1192 if (!vq->used || l != s) {
1194 goto fail_alloc_used;
1197 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1202 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1203 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1205 VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
1209 /* Clear and discard previous events if any. */
1210 event_notifier_test_and_clear(&vq->masked_notifier);
1212 /* Init vring in unmasked state, unless guest_notifier_mask
1215 if (!vdev->use_guest_notifier_mask) {
1216 /* TODO: check and handle errors. */
1217 vhost_virtqueue_mask(dev, vdev, idx, false);
1220 if (k->query_guest_notifiers &&
1221 k->query_guest_notifiers(qbus->parent) &&
1222 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1224 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1235 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1238 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1241 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1247 void vhost_virtqueue_stop(struct vhost_dev *dev,
1248 struct VirtIODevice *vdev,
1249 struct vhost_virtqueue *vq,
1252 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1253 struct vhost_vring_state state = {
1254 .index = vhost_vq_index,
1258 if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
1259 /* Don't stop the virtqueue which might have not been started */
1263 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1265 VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r);
1266 /* Connection to the backend is broken, so let's sync internal
1267 * last avail idx to the device used idx.
1269 virtio_queue_restore_last_avail_idx(vdev, idx);
1271 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1273 virtio_queue_invalidate_signalled_used(vdev, idx);
1274 virtio_queue_update_used_idx(vdev, idx);
1276 /* In the cross-endian case, we need to reset the vring endianness to
1277 * native as legacy devices expect so by default.
1279 if (vhost_needs_vring_endian(vdev)) {
1280 vhost_virtqueue_set_vring_endian_legacy(dev,
1281 !virtio_is_big_endian(vdev),
1285 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1286 1, virtio_queue_get_used_size(vdev, idx));
1287 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1288 0, virtio_queue_get_avail_size(vdev, idx));
1289 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1290 0, virtio_queue_get_desc_size(vdev, idx));
1293 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1294 int n, uint32_t timeout)
1296 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1297 struct vhost_vring_state state = {
1298 .index = vhost_vq_index,
1303 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1307 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1309 VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed");
1316 static void vhost_virtqueue_error_notifier(EventNotifier *n)
1318 struct vhost_virtqueue *vq = container_of(n, struct vhost_virtqueue,
1320 struct vhost_dev *dev = vq->dev;
1321 int index = vq - dev->vqs;
1323 if (event_notifier_test_and_clear(n) && dev->vdev) {
1324 VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d",
1325 dev->vq_index + index);
1329 static int vhost_virtqueue_init(struct vhost_dev *dev,
1330 struct vhost_virtqueue *vq, int n)
1332 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1333 struct vhost_vring_file file = {
1334 .index = vhost_vq_index,
1336 int r = event_notifier_init(&vq->masked_notifier, 0);
1341 file.fd = event_notifier_get_wfd(&vq->masked_notifier);
1342 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1344 VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
1350 if (dev->vhost_ops->vhost_set_vring_err) {
1351 r = event_notifier_init(&vq->error_notifier, 0);
1356 file.fd = event_notifier_get_fd(&vq->error_notifier);
1357 r = dev->vhost_ops->vhost_set_vring_err(dev, &file);
1359 VHOST_OPS_DEBUG(r, "vhost_set_vring_err failed");
1363 event_notifier_set_handler(&vq->error_notifier,
1364 vhost_virtqueue_error_notifier);
1370 event_notifier_cleanup(&vq->error_notifier);
1372 event_notifier_cleanup(&vq->masked_notifier);
1376 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1378 event_notifier_cleanup(&vq->masked_notifier);
1379 if (vq->dev->vhost_ops->vhost_set_vring_err) {
1380 event_notifier_set_handler(&vq->error_notifier, NULL);
1381 event_notifier_cleanup(&vq->error_notifier);
1385 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1386 VhostBackendType backend_type, uint32_t busyloop_timeout,
1390 int i, r, n_initialized_vqs = 0;
1393 hdev->migration_blocker = NULL;
1395 r = vhost_set_backend_type(hdev, backend_type);
1398 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp);
1403 r = hdev->vhost_ops->vhost_set_owner(hdev);
1405 error_setg_errno(errp, -r, "vhost_set_owner failed");
1409 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1411 error_setg_errno(errp, -r, "vhost_get_features failed");
1415 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1416 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1418 error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i);
1423 if (busyloop_timeout) {
1424 for (i = 0; i < hdev->nvqs; ++i) {
1425 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1428 error_setg_errno(errp, -r, "Failed to set busyloop timeout");
1434 hdev->features = features;
1436 hdev->memory_listener = (MemoryListener) {
1438 .begin = vhost_begin,
1439 .commit = vhost_commit,
1440 .region_add = vhost_region_addnop,
1441 .region_nop = vhost_region_addnop,
1442 .log_start = vhost_log_start,
1443 .log_stop = vhost_log_stop,
1444 .log_sync = vhost_log_sync,
1445 .log_global_start = vhost_log_global_start,
1446 .log_global_stop = vhost_log_global_stop,
1450 hdev->iommu_listener = (MemoryListener) {
1451 .name = "vhost-iommu",
1452 .region_add = vhost_iommu_region_add,
1453 .region_del = vhost_iommu_region_del,
1456 if (hdev->migration_blocker == NULL) {
1457 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1458 error_setg(&hdev->migration_blocker,
1459 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1460 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1461 error_setg(&hdev->migration_blocker,
1462 "Migration disabled: failed to allocate shared memory");
1466 if (hdev->migration_blocker != NULL) {
1467 r = migrate_add_blocker(hdev->migration_blocker, errp);
1469 error_free(hdev->migration_blocker);
1474 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1475 hdev->n_mem_sections = 0;
1476 hdev->mem_sections = NULL;
1479 hdev->log_enabled = false;
1480 hdev->started = false;
1481 memory_listener_register(&hdev->memory_listener, &address_space_memory);
1482 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1484 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1485 error_setg(errp, "vhost backend memory slots limit is less"
1486 " than current number of present memory slots");
1494 if (busyloop_timeout) {
1496 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1500 hdev->nvqs = n_initialized_vqs;
1501 vhost_dev_cleanup(hdev);
1505 void vhost_dev_cleanup(struct vhost_dev *hdev)
1509 trace_vhost_dev_cleanup(hdev);
1511 for (i = 0; i < hdev->nvqs; ++i) {
1512 vhost_virtqueue_cleanup(hdev->vqs + i);
1515 /* those are only safe after successful init */
1516 memory_listener_unregister(&hdev->memory_listener);
1517 QLIST_REMOVE(hdev, entry);
1519 if (hdev->migration_blocker) {
1520 migrate_del_blocker(hdev->migration_blocker);
1521 error_free(hdev->migration_blocker);
1524 g_free(hdev->mem_sections);
1525 if (hdev->vhost_ops) {
1526 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1530 memset(hdev, 0, sizeof(struct vhost_dev));
1533 static void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
1537 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1541 * Batch all the host notifiers in a single transaction to avoid
1542 * quadratic time complexity in address_space_update_ioeventfds().
1544 memory_region_transaction_begin();
1546 for (i = 0; i < nvqs; ++i) {
1547 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1550 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1556 * The transaction expects the ioeventfds to be open when it
1557 * commits. Do it now, before the cleanup loop.
1559 memory_region_transaction_commit();
1561 for (i = 0; i < nvqs; ++i) {
1562 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1564 virtio_device_release_ioeventfd(vdev);
1567 /* Stop processing guest IO notifications in qemu.
1568 * Start processing them in vhost in kernel.
1570 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1572 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1575 /* We will pass the notifiers to the kernel, make sure that QEMU
1576 * doesn't interfere.
1578 r = virtio_device_grab_ioeventfd(vdev);
1580 error_report("binding does not support host notifiers");
1585 * Batch all the host notifiers in a single transaction to avoid
1586 * quadratic time complexity in address_space_update_ioeventfds().
1588 memory_region_transaction_begin();
1590 for (i = 0; i < hdev->nvqs; ++i) {
1591 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1594 error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1595 memory_region_transaction_commit();
1596 vhost_dev_disable_notifiers_nvqs(hdev, vdev, i);
1601 memory_region_transaction_commit();
1606 /* Stop processing guest IO notifications in vhost.
1607 * Start processing them in qemu.
1608 * This might actually run the qemu handlers right away,
1609 * so virtio in qemu must be completely setup when this is called.
1611 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1613 vhost_dev_disable_notifiers_nvqs(hdev, vdev, hdev->nvqs);
1616 /* Test and clear event pending status.
1617 * Should be called after unmask to avoid losing events.
1619 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1621 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1622 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1623 return event_notifier_test_and_clear(&vq->masked_notifier);
1626 /* Mask/unmask events from this vq. */
1627 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1630 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1631 int r, index = n - hdev->vq_index;
1632 struct vhost_vring_file file;
1634 /* should only be called after backend is connected */
1635 assert(hdev->vhost_ops);
1638 assert(vdev->use_guest_notifier_mask);
1639 file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier);
1641 file.fd = event_notifier_get_wfd(virtio_queue_get_guest_notifier(vvq));
1644 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1645 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1647 error_report("vhost_set_vring_call failed %d", -r);
1651 bool vhost_config_pending(struct vhost_dev *hdev)
1653 assert(hdev->vhost_ops);
1654 if ((hdev->started == false) ||
1655 (hdev->vhost_ops->vhost_set_config_call == NULL)) {
1659 EventNotifier *notifier =
1660 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier;
1661 return event_notifier_test_and_clear(notifier);
1664 void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask)
1668 EventNotifier *notifier =
1669 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier;
1670 EventNotifier *config_notifier = &vdev->config_notifier;
1671 assert(hdev->vhost_ops);
1673 if ((hdev->started == false) ||
1674 (hdev->vhost_ops->vhost_set_config_call == NULL)) {
1678 assert(vdev->use_guest_notifier_mask);
1679 fd = event_notifier_get_fd(notifier);
1681 fd = event_notifier_get_fd(config_notifier);
1683 r = hdev->vhost_ops->vhost_set_config_call(hdev, fd);
1685 error_report("vhost_set_config_call failed %d", -r);
1689 static void vhost_stop_config_intr(struct vhost_dev *dev)
1692 assert(dev->vhost_ops);
1693 if (dev->vhost_ops->vhost_set_config_call) {
1694 dev->vhost_ops->vhost_set_config_call(dev, fd);
1698 static void vhost_start_config_intr(struct vhost_dev *dev)
1702 assert(dev->vhost_ops);
1703 int fd = event_notifier_get_fd(&dev->vdev->config_notifier);
1704 if (dev->vhost_ops->vhost_set_config_call) {
1705 r = dev->vhost_ops->vhost_set_config_call(dev, fd);
1707 event_notifier_set(&dev->vdev->config_notifier);
1712 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1715 const int *bit = feature_bits;
1716 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1717 uint64_t bit_mask = (1ULL << *bit);
1718 if (!(hdev->features & bit_mask)) {
1719 features &= ~bit_mask;
1726 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1729 const int *bit = feature_bits;
1730 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1731 uint64_t bit_mask = (1ULL << *bit);
1732 if (features & bit_mask) {
1733 hdev->acked_features |= bit_mask;
1739 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1740 uint32_t config_len, Error **errp)
1742 assert(hdev->vhost_ops);
1744 if (hdev->vhost_ops->vhost_get_config) {
1745 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len,
1749 error_setg(errp, "vhost_get_config not implemented");
1753 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1754 uint32_t offset, uint32_t size, uint32_t flags)
1756 assert(hdev->vhost_ops);
1758 if (hdev->vhost_ops->vhost_set_config) {
1759 return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1766 void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1767 const VhostDevConfigOps *ops)
1769 hdev->config_ops = ops;
1772 void vhost_dev_free_inflight(struct vhost_inflight *inflight)
1774 if (inflight && inflight->addr) {
1775 qemu_memfd_free(inflight->addr, inflight->size, inflight->fd);
1776 inflight->addr = NULL;
1781 static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
1786 void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
1787 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1791 error_report_err(err);
1795 vhost_dev_free_inflight(inflight);
1796 inflight->offset = 0;
1797 inflight->addr = addr;
1799 inflight->size = new_size;
1804 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1806 if (inflight->addr) {
1807 qemu_put_be64(f, inflight->size);
1808 qemu_put_be16(f, inflight->queue_size);
1809 qemu_put_buffer(f, inflight->addr, inflight->size);
1811 qemu_put_be64(f, 0);
1815 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1819 size = qemu_get_be64(f);
1824 if (inflight->size != size) {
1825 int ret = vhost_dev_resize_inflight(inflight, size);
1830 inflight->queue_size = qemu_get_be16(f);
1832 qemu_get_buffer(f, inflight->addr, size);
1837 int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev)
1841 if (hdev->vhost_ops->vhost_get_inflight_fd == NULL ||
1842 hdev->vhost_ops->vhost_set_inflight_fd == NULL) {
1848 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1850 VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed");
1857 int vhost_dev_set_inflight(struct vhost_dev *dev,
1858 struct vhost_inflight *inflight)
1862 if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
1863 r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
1865 VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed");
1873 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
1874 struct vhost_inflight *inflight)
1878 if (dev->vhost_ops->vhost_get_inflight_fd) {
1879 r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
1881 VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed");
1889 static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable)
1891 if (!hdev->vhost_ops->vhost_set_vring_enable) {
1896 * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not
1897 * been negotiated, the rings start directly in the enabled state, and
1898 * .vhost_set_vring_enable callback will fail since
1899 * VHOST_USER_SET_VRING_ENABLE is not supported.
1901 if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER &&
1902 !virtio_has_feature(hdev->backend_features,
1903 VHOST_USER_F_PROTOCOL_FEATURES)) {
1907 return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable);
1910 /* Host notifiers must be enabled at this point. */
1911 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
1915 /* should only be called after backend is connected */
1916 assert(hdev->vhost_ops);
1918 trace_vhost_dev_start(hdev, vdev->name, vrings);
1920 vdev->vhost_started = true;
1921 hdev->started = true;
1924 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1929 if (vhost_dev_has_iommu(hdev)) {
1930 memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1933 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1935 VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
1938 for (i = 0; i < hdev->nvqs; ++i) {
1939 r = vhost_virtqueue_start(hdev,
1942 hdev->vq_index + i);
1948 r = event_notifier_init(
1949 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0);
1951 VHOST_OPS_DEBUG(r, "event_notifier_init failed");
1954 event_notifier_test_and_clear(
1955 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
1956 if (!vdev->use_guest_notifier_mask) {
1957 vhost_config_mask(hdev, vdev, true);
1959 if (hdev->log_enabled) {
1962 hdev->log_size = vhost_get_log_size(hdev);
1963 hdev->log = vhost_log_get(hdev->log_size,
1964 vhost_dev_log_is_shared(hdev));
1965 log_base = (uintptr_t)hdev->log->log;
1966 r = hdev->vhost_ops->vhost_set_log_base(hdev,
1967 hdev->log_size ? log_base : 0,
1970 VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
1975 r = vhost_dev_set_vring_enable(hdev, true);
1980 if (hdev->vhost_ops->vhost_dev_start) {
1981 r = hdev->vhost_ops->vhost_dev_start(hdev, true);
1986 if (vhost_dev_has_iommu(hdev) &&
1987 hdev->vhost_ops->vhost_set_iotlb_callback) {
1988 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1990 /* Update used ring information for IOTLB to work correctly,
1991 * vhost-kernel code requires for this.*/
1992 for (i = 0; i < hdev->nvqs; ++i) {
1993 struct vhost_virtqueue *vq = hdev->vqs + i;
1994 vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1997 vhost_start_config_intr(hdev);
2001 vhost_dev_set_vring_enable(hdev, false);
2004 vhost_log_put(hdev, false);
2007 vhost_virtqueue_stop(hdev,
2010 hdev->vq_index + i);
2014 if (vhost_dev_has_iommu(hdev)) {
2015 memory_listener_unregister(&hdev->iommu_listener);
2018 vdev->vhost_started = false;
2019 hdev->started = false;
2023 /* Host notifiers must be enabled at this point. */
2024 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
2028 /* should only be called after backend is connected */
2029 assert(hdev->vhost_ops);
2030 event_notifier_test_and_clear(
2031 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
2032 event_notifier_test_and_clear(&vdev->config_notifier);
2034 trace_vhost_dev_stop(hdev, vdev->name, vrings);
2036 if (hdev->vhost_ops->vhost_dev_start) {
2037 hdev->vhost_ops->vhost_dev_start(hdev, false);
2040 vhost_dev_set_vring_enable(hdev, false);
2042 for (i = 0; i < hdev->nvqs; ++i) {
2043 vhost_virtqueue_stop(hdev,
2046 hdev->vq_index + i);
2048 if (hdev->vhost_ops->vhost_reset_status) {
2049 hdev->vhost_ops->vhost_reset_status(hdev);
2052 if (vhost_dev_has_iommu(hdev)) {
2053 if (hdev->vhost_ops->vhost_set_iotlb_callback) {
2054 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
2056 memory_listener_unregister(&hdev->iommu_listener);
2058 vhost_stop_config_intr(hdev);
2059 vhost_log_put(hdev, true);
2060 hdev->started = false;
2061 vdev->vhost_started = false;
2065 int vhost_net_set_backend(struct vhost_dev *hdev,
2066 struct vhost_vring_file *file)
2068 if (hdev->vhost_ops->vhost_net_set_backend) {
2069 return hdev->vhost_ops->vhost_net_set_backend(hdev, file);