1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 #include <linux/iommufd.h>
5 #include <linux/slab.h>
6 #include <linux/iommu.h>
8 #include "io_pagetable.h"
9 #include "iommufd_private.h"
11 static bool allow_unsafe_interrupts;
12 module_param(allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
14 allow_unsafe_interrupts,
15 "Allow IOMMUFD to bind to devices even if the platform cannot isolate "
16 "the MSI interrupt window. Enabling this is a security weakness.");
18 void iommufd_device_destroy(struct iommufd_object *obj)
20 struct iommufd_device *idev =
21 container_of(obj, struct iommufd_device, obj);
23 iommu_device_release_dma_owner(idev->dev);
24 iommu_group_put(idev->group);
25 if (!iommufd_selftest_is_mock_dev(idev->dev))
26 iommufd_ctx_put(idev->ictx);
30 * iommufd_device_bind - Bind a physical device to an iommu fd
31 * @ictx: iommufd file descriptor
32 * @dev: Pointer to a physical device struct
33 * @id: Output ID number to return to userspace for this device
35 * A successful bind establishes an ownership over the device and returns
36 * struct iommufd_device pointer, otherwise returns error pointer.
38 * A driver using this API must set driver_managed_dma and must not touch
39 * the device until this routine succeeds and establishes ownership.
41 * Binding a PCI device places the entire RID under iommufd control.
43 * The caller must undo this with iommufd_device_unbind()
45 struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
46 struct device *dev, u32 *id)
48 struct iommufd_device *idev;
49 struct iommu_group *group;
53 * iommufd always sets IOMMU_CACHE because we offer no way for userspace
54 * to restore cache coherency.
56 if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY))
57 return ERR_PTR(-EINVAL);
59 group = iommu_group_get(dev);
61 return ERR_PTR(-ENODEV);
63 rc = iommu_device_claim_dma_owner(dev, ictx);
67 idev = iommufd_object_alloc(ictx, idev, IOMMUFD_OBJ_DEVICE);
70 goto out_release_owner;
73 if (!iommufd_selftest_is_mock_dev(dev))
74 iommufd_ctx_get(ictx);
76 idev->enforce_cache_coherency =
77 device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
78 /* The calling driver is a user until iommufd_device_unbind() */
79 refcount_inc(&idev->obj.users);
80 /* group refcount moves into iommufd_device */
84 * If the caller fails after this success it must call
85 * iommufd_unbind_device() which is safe since we hold this refcount.
86 * This also means the device is a leaf in the graph and no other object
87 * can take a reference on it.
89 iommufd_object_finalize(ictx, &idev->obj);
94 iommu_device_release_dma_owner(dev);
96 iommu_group_put(group);
99 EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD);
102 * iommufd_ctx_has_group - True if any device within the group is bound
104 * @ictx: iommufd file descriptor
105 * @group: Pointer to a physical iommu_group struct
107 * True if any device within the group has been bound to this ictx, ex. via
108 * iommufd_device_bind(), therefore implying ictx ownership of the group.
110 bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group)
112 struct iommufd_object *obj;
118 xa_lock(&ictx->objects);
119 xa_for_each(&ictx->objects, index, obj) {
120 if (obj->type == IOMMUFD_OBJ_DEVICE &&
121 container_of(obj, struct iommufd_device, obj)->group == group) {
122 xa_unlock(&ictx->objects);
126 xa_unlock(&ictx->objects);
129 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_has_group, IOMMUFD);
132 * iommufd_device_unbind - Undo iommufd_device_bind()
133 * @idev: Device returned by iommufd_device_bind()
135 * Release the device from iommufd control. The DMA ownership will return back
136 * to unowned with DMA controlled by the DMA API. This invalidates the
137 * iommufd_device pointer, other APIs that consume it must not be called
140 void iommufd_device_unbind(struct iommufd_device *idev)
142 iommufd_object_destroy_user(idev->ictx, &idev->obj);
144 EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD);
146 struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev)
150 EXPORT_SYMBOL_NS_GPL(iommufd_device_to_ictx, IOMMUFD);
152 u32 iommufd_device_to_id(struct iommufd_device *idev)
156 EXPORT_SYMBOL_NS_GPL(iommufd_device_to_id, IOMMUFD);
158 static int iommufd_device_setup_msi(struct iommufd_device *idev,
159 struct iommufd_hw_pagetable *hwpt,
160 phys_addr_t sw_msi_start)
165 * If the IOMMU driver gives a IOMMU_RESV_SW_MSI then it is asking us to
166 * call iommu_get_msi_cookie() on its behalf. This is necessary to setup
167 * the MSI window so iommu_dma_prepare_msi() can install pages into our
168 * domain after request_irq(). If it is not done interrupts will not
169 * work on this domain.
171 * FIXME: This is conceptually broken for iommufd since we want to allow
172 * userspace to change the domains, eg switch from an identity IOAS to a
173 * DMA IOAS. There is currently no way to create a MSI window that
174 * matches what the IRQ layer actually expects in a newly created
177 if (sw_msi_start != PHYS_ADDR_MAX && !hwpt->msi_cookie) {
178 rc = iommu_get_msi_cookie(hwpt->domain, sw_msi_start);
183 * iommu_get_msi_cookie() can only be called once per domain,
184 * it returns -EBUSY on later calls.
186 hwpt->msi_cookie = true;
190 * For historical compat with VFIO the insecure interrupt path is
191 * allowed if the module parameter is set. Insecure means that a MemWr
192 * operation from the device (eg a simple DMA) cannot trigger an
193 * interrupt outside this iommufd context.
195 if (!iommufd_selftest_is_mock_dev(idev->dev) &&
196 !iommu_group_has_isolated_msi(idev->group)) {
197 if (!allow_unsafe_interrupts)
202 "MSI interrupts are not secure, they cannot be isolated by the platform. "
203 "Check that platform features like interrupt remapping are enabled. "
204 "Use the \"allow_unsafe_interrupts\" module parameter to override\n");
209 static bool iommufd_hw_pagetable_has_group(struct iommufd_hw_pagetable *hwpt,
210 struct iommu_group *group)
212 struct iommufd_device *cur_dev;
214 lockdep_assert_held(&hwpt->devices_lock);
216 list_for_each_entry(cur_dev, &hwpt->devices, devices_item)
217 if (cur_dev->group == group)
222 int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
223 struct iommufd_device *idev)
225 phys_addr_t sw_msi_start = PHYS_ADDR_MAX;
228 lockdep_assert_held(&hwpt->devices_lock);
230 if (WARN_ON(idev->hwpt))
234 * Try to upgrade the domain we have, it is an iommu driver bug to
235 * report IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail
236 * enforce_cache_coherency when there are no devices attached to the
239 if (idev->enforce_cache_coherency && !hwpt->enforce_cache_coherency) {
240 if (hwpt->domain->ops->enforce_cache_coherency)
241 hwpt->enforce_cache_coherency =
242 hwpt->domain->ops->enforce_cache_coherency(
244 if (!hwpt->enforce_cache_coherency) {
245 WARN_ON(list_empty(&hwpt->devices));
250 rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev,
251 idev->group, &sw_msi_start);
255 rc = iommufd_device_setup_msi(idev, hwpt, sw_msi_start);
260 * FIXME: Hack around missing a device-centric iommu api, only attach to
261 * the group once for the first device that is in the group.
263 if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) {
264 rc = iommu_attach_group(hwpt->domain, idev->group);
270 iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
274 void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt,
275 struct iommufd_device *idev)
277 if (!iommufd_hw_pagetable_has_group(hwpt, idev->group))
278 iommu_detach_group(hwpt->domain, idev->group);
279 iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
282 static int iommufd_device_do_attach(struct iommufd_device *idev,
283 struct iommufd_hw_pagetable *hwpt)
287 mutex_lock(&hwpt->devices_lock);
288 rc = iommufd_hw_pagetable_attach(hwpt, idev);
293 refcount_inc(&hwpt->obj.users);
294 list_add(&idev->devices_item, &hwpt->devices);
296 mutex_unlock(&hwpt->devices_lock);
301 * When automatically managing the domains we search for a compatible domain in
302 * the iopt and if one is found use it, otherwise create a new domain.
303 * Automatic domain selection will never pick a manually created domain.
305 static int iommufd_device_auto_get_domain(struct iommufd_device *idev,
306 struct iommufd_ioas *ioas)
308 struct iommufd_hw_pagetable *hwpt;
312 * There is no differentiation when domains are allocated, so any domain
313 * that is willing to attach to the device is interchangeable with any
316 mutex_lock(&ioas->mutex);
317 list_for_each_entry(hwpt, &ioas->hwpt_list, hwpt_item) {
318 if (!hwpt->auto_domain)
321 if (!iommufd_lock_obj(&hwpt->obj))
323 rc = iommufd_device_do_attach(idev, hwpt);
324 iommufd_put_object(&hwpt->obj);
327 * -EINVAL means the domain is incompatible with the device.
328 * Other error codes should propagate to userspace as failure.
329 * Success means the domain is attached.
336 hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, true);
341 hwpt->auto_domain = true;
343 mutex_unlock(&ioas->mutex);
344 iommufd_object_finalize(idev->ictx, &hwpt->obj);
347 mutex_unlock(&ioas->mutex);
352 * iommufd_device_attach - Connect a device from an iommu_domain
353 * @idev: device to attach
354 * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
355 * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
357 * This connects the device to an iommu_domain, either automatically or manually
358 * selected. Once this completes the device could do DMA.
360 * The caller should return the resulting pt_id back to userspace.
361 * This function is undone by calling iommufd_device_detach().
363 int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
365 struct iommufd_object *pt_obj;
368 pt_obj = iommufd_get_object(idev->ictx, *pt_id, IOMMUFD_OBJ_ANY);
370 return PTR_ERR(pt_obj);
372 switch (pt_obj->type) {
373 case IOMMUFD_OBJ_HW_PAGETABLE: {
374 struct iommufd_hw_pagetable *hwpt =
375 container_of(pt_obj, struct iommufd_hw_pagetable, obj);
377 rc = iommufd_device_do_attach(idev, hwpt);
382 case IOMMUFD_OBJ_IOAS: {
383 struct iommufd_ioas *ioas =
384 container_of(pt_obj, struct iommufd_ioas, obj);
386 rc = iommufd_device_auto_get_domain(idev, ioas);
396 refcount_inc(&idev->obj.users);
397 *pt_id = idev->hwpt->obj.id;
401 iommufd_put_object(pt_obj);
404 EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD);
407 * iommufd_device_detach - Disconnect a device to an iommu_domain
408 * @idev: device to detach
410 * Undo iommufd_device_attach(). This disconnects the idev from the previously
411 * attached pt_id. The device returns back to a blocked DMA translation.
413 void iommufd_device_detach(struct iommufd_device *idev)
415 struct iommufd_hw_pagetable *hwpt = idev->hwpt;
417 mutex_lock(&hwpt->devices_lock);
418 list_del(&idev->devices_item);
420 iommufd_hw_pagetable_detach(hwpt, idev);
421 mutex_unlock(&hwpt->devices_lock);
423 if (hwpt->auto_domain)
424 iommufd_object_deref_user(idev->ictx, &hwpt->obj);
426 refcount_dec(&hwpt->obj.users);
428 refcount_dec(&idev->obj.users);
430 EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
432 void iommufd_access_destroy_object(struct iommufd_object *obj)
434 struct iommufd_access *access =
435 container_of(obj, struct iommufd_access, obj);
438 iopt_remove_access(&access->ioas->iopt, access);
439 refcount_dec(&access->ioas->obj.users);
442 iommufd_ctx_put(access->ictx);
446 * iommufd_access_create - Create an iommufd_access
447 * @ictx: iommufd file descriptor
448 * @ops: Driver's ops to associate with the access
449 * @data: Opaque data to pass into ops functions
450 * @id: Output ID number to return to userspace for this access
452 * An iommufd_access allows a driver to read/write to the IOAS without using
453 * DMA. The underlying CPU memory can be accessed using the
454 * iommufd_access_pin_pages() or iommufd_access_rw() functions.
456 * The provided ops are required to use iommufd_access_pin_pages().
458 struct iommufd_access *
459 iommufd_access_create(struct iommufd_ctx *ictx,
460 const struct iommufd_access_ops *ops, void *data, u32 *id)
462 struct iommufd_access *access;
465 * There is no uAPI for the access object, but to keep things symmetric
466 * use the object infrastructure anyhow.
468 access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS);
475 if (ops->needs_pin_pages)
476 access->iova_alignment = PAGE_SIZE;
478 access->iova_alignment = 1;
480 /* The calling driver is a user until iommufd_access_destroy() */
481 refcount_inc(&access->obj.users);
483 iommufd_ctx_get(ictx);
484 iommufd_object_finalize(ictx, &access->obj);
485 *id = access->obj.id;
486 mutex_init(&access->ioas_lock);
489 EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD);
492 * iommufd_access_destroy - Destroy an iommufd_access
493 * @access: The access to destroy
495 * The caller must stop using the access before destroying it.
497 void iommufd_access_destroy(struct iommufd_access *access)
499 iommufd_object_destroy_user(access->ictx, &access->obj);
501 EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
503 void iommufd_access_detach(struct iommufd_access *access)
505 struct iommufd_ioas *cur_ioas = access->ioas;
507 mutex_lock(&access->ioas_lock);
508 if (WARN_ON(!access->ioas))
511 * Set ioas to NULL to block any further iommufd_access_pin_pages().
512 * iommufd_access_unpin_pages() can continue using access->ioas_unpin.
516 if (access->ops->unmap) {
517 mutex_unlock(&access->ioas_lock);
518 access->ops->unmap(access->data, 0, ULONG_MAX);
519 mutex_lock(&access->ioas_lock);
521 iopt_remove_access(&cur_ioas->iopt, access);
522 refcount_dec(&cur_ioas->obj.users);
524 access->ioas_unpin = NULL;
525 mutex_unlock(&access->ioas_lock);
527 EXPORT_SYMBOL_NS_GPL(iommufd_access_detach, IOMMUFD);
529 int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id)
531 struct iommufd_ioas *new_ioas;
534 mutex_lock(&access->ioas_lock);
535 if (WARN_ON(access->ioas || access->ioas_unpin)) {
536 mutex_unlock(&access->ioas_lock);
540 new_ioas = iommufd_get_ioas(access->ictx, ioas_id);
541 if (IS_ERR(new_ioas)) {
542 mutex_unlock(&access->ioas_lock);
543 return PTR_ERR(new_ioas);
546 rc = iopt_add_access(&new_ioas->iopt, access);
548 mutex_unlock(&access->ioas_lock);
549 iommufd_put_object(&new_ioas->obj);
552 iommufd_ref_to_users(&new_ioas->obj);
554 access->ioas = new_ioas;
555 access->ioas_unpin = new_ioas;
556 mutex_unlock(&access->ioas_lock);
559 EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, IOMMUFD);
562 * iommufd_access_notify_unmap - Notify users of an iopt to stop using it
563 * @iopt: iopt to work on
564 * @iova: Starting iova in the iopt
565 * @length: Number of bytes
567 * After this function returns there should be no users attached to the pages
568 * linked to this iopt that intersect with iova,length. Anyone that has attached
569 * a user through iopt_access_pages() needs to detach it through
570 * iommufd_access_unpin_pages() before this function returns.
572 * iommufd_access_destroy() will wait for any outstanding unmap callback to
573 * complete. Once iommufd_access_destroy() no unmap ops are running or will
574 * run in the future. Due to this a driver must not create locking that prevents
575 * unmap to complete while iommufd_access_destroy() is running.
577 void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
578 unsigned long length)
580 struct iommufd_ioas *ioas =
581 container_of(iopt, struct iommufd_ioas, iopt);
582 struct iommufd_access *access;
585 xa_lock(&ioas->iopt.access_list);
586 xa_for_each(&ioas->iopt.access_list, index, access) {
587 if (!iommufd_lock_obj(&access->obj))
589 xa_unlock(&ioas->iopt.access_list);
591 access->ops->unmap(access->data, iova, length);
593 iommufd_put_object(&access->obj);
594 xa_lock(&ioas->iopt.access_list);
596 xa_unlock(&ioas->iopt.access_list);
600 * iommufd_access_unpin_pages() - Undo iommufd_access_pin_pages
601 * @access: IOAS access to act on
602 * @iova: Starting IOVA
603 * @length: Number of bytes to access
605 * Return the struct page's. The caller must stop accessing them before calling
606 * this. The iova/length must exactly match the one provided to access_pages.
608 void iommufd_access_unpin_pages(struct iommufd_access *access,
609 unsigned long iova, unsigned long length)
611 struct iopt_area_contig_iter iter;
612 struct io_pagetable *iopt;
613 unsigned long last_iova;
614 struct iopt_area *area;
616 if (WARN_ON(!length) ||
617 WARN_ON(check_add_overflow(iova, length - 1, &last_iova)))
620 mutex_lock(&access->ioas_lock);
622 * The driver must be doing something wrong if it calls this before an
623 * iommufd_access_attach() or after an iommufd_access_detach().
625 if (WARN_ON(!access->ioas_unpin)) {
626 mutex_unlock(&access->ioas_lock);
629 iopt = &access->ioas_unpin->iopt;
631 down_read(&iopt->iova_rwsem);
632 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
633 iopt_area_remove_access(
634 area, iopt_area_iova_to_index(area, iter.cur_iova),
635 iopt_area_iova_to_index(
637 min(last_iova, iopt_area_last_iova(area))));
638 WARN_ON(!iopt_area_contig_done(&iter));
639 up_read(&iopt->iova_rwsem);
640 mutex_unlock(&access->ioas_lock);
642 EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
644 static bool iopt_area_contig_is_aligned(struct iopt_area_contig_iter *iter)
646 if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE)
649 if (!iopt_area_contig_done(iter) &&
650 (iopt_area_start_byte(iter->area, iopt_area_last_iova(iter->area)) %
651 PAGE_SIZE) != (PAGE_SIZE - 1))
656 static bool check_area_prot(struct iopt_area *area, unsigned int flags)
658 if (flags & IOMMUFD_ACCESS_RW_WRITE)
659 return area->iommu_prot & IOMMU_WRITE;
660 return area->iommu_prot & IOMMU_READ;
664 * iommufd_access_pin_pages() - Return a list of pages under the iova
665 * @access: IOAS access to act on
666 * @iova: Starting IOVA
667 * @length: Number of bytes to access
668 * @out_pages: Output page list
669 * @flags: IOPMMUFD_ACCESS_RW_* flags
671 * Reads @length bytes starting at iova and returns the struct page * pointers.
672 * These can be kmap'd by the caller for CPU access.
674 * The caller must perform iommufd_access_unpin_pages() when done to balance
677 * This API always requires a page aligned iova. This happens naturally if the
678 * ioas alignment is >= PAGE_SIZE and the iova is PAGE_SIZE aligned. However
679 * smaller alignments have corner cases where this API can fail on otherwise
682 int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
683 unsigned long length, struct page **out_pages,
686 struct iopt_area_contig_iter iter;
687 struct io_pagetable *iopt;
688 unsigned long last_iova;
689 struct iopt_area *area;
692 /* Driver's ops don't support pin_pages */
693 if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
694 WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap))
699 if (check_add_overflow(iova, length - 1, &last_iova))
702 mutex_lock(&access->ioas_lock);
704 mutex_unlock(&access->ioas_lock);
707 iopt = &access->ioas->iopt;
709 down_read(&iopt->iova_rwsem);
710 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
711 unsigned long last = min(last_iova, iopt_area_last_iova(area));
712 unsigned long last_index = iopt_area_iova_to_index(area, last);
713 unsigned long index =
714 iopt_area_iova_to_index(area, iter.cur_iova);
716 if (area->prevent_access ||
717 !iopt_area_contig_is_aligned(&iter)) {
722 if (!check_area_prot(area, flags)) {
727 rc = iopt_area_add_access(area, index, last_index, out_pages,
731 out_pages += last_index - index + 1;
733 if (!iopt_area_contig_done(&iter)) {
738 up_read(&iopt->iova_rwsem);
739 mutex_unlock(&access->ioas_lock);
743 if (iova < iter.cur_iova) {
744 last_iova = iter.cur_iova - 1;
745 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
746 iopt_area_remove_access(
748 iopt_area_iova_to_index(area, iter.cur_iova),
749 iopt_area_iova_to_index(
751 iopt_area_last_iova(area))));
753 up_read(&iopt->iova_rwsem);
754 mutex_unlock(&access->ioas_lock);
757 EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
760 * iommufd_access_rw - Read or write data under the iova
761 * @access: IOAS access to act on
762 * @iova: Starting IOVA
763 * @data: Kernel buffer to copy to/from
764 * @length: Number of bytes to access
765 * @flags: IOMMUFD_ACCESS_RW_* flags
767 * Copy kernel to/from data into the range given by IOVA/length. If flags
768 * indicates IOMMUFD_ACCESS_RW_KTHREAD then a large copy can be optimized
769 * by changing it into copy_to/from_user().
771 int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
772 void *data, size_t length, unsigned int flags)
774 struct iopt_area_contig_iter iter;
775 struct io_pagetable *iopt;
776 struct iopt_area *area;
777 unsigned long last_iova;
782 if (check_add_overflow(iova, length - 1, &last_iova))
785 mutex_lock(&access->ioas_lock);
787 mutex_unlock(&access->ioas_lock);
790 iopt = &access->ioas->iopt;
792 down_read(&iopt->iova_rwsem);
793 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
794 unsigned long last = min(last_iova, iopt_area_last_iova(area));
795 unsigned long bytes = (last - iter.cur_iova) + 1;
797 if (area->prevent_access) {
802 if (!check_area_prot(area, flags)) {
807 rc = iopt_pages_rw_access(
808 area->pages, iopt_area_start_byte(area, iter.cur_iova),
814 if (!iopt_area_contig_done(&iter))
817 up_read(&iopt->iova_rwsem);
818 mutex_unlock(&access->ioas_lock);
821 EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD);