1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
4 #include <linux/iommufd.h>
5 #include <linux/slab.h>
6 #include <linux/iommu.h>
8 #include "io_pagetable.h"
9 #include "iommufd_private.h"
11 static bool allow_unsafe_interrupts;
12 module_param(allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
14 allow_unsafe_interrupts,
15 "Allow IOMMUFD to bind to devices even if the platform cannot isolate "
16 "the MSI interrupt window. Enabling this is a security weakness.");
18 void iommufd_device_destroy(struct iommufd_object *obj)
20 struct iommufd_device *idev =
21 container_of(obj, struct iommufd_device, obj);
23 iommu_device_release_dma_owner(idev->dev);
24 iommu_group_put(idev->group);
25 if (!iommufd_selftest_is_mock_dev(idev->dev))
26 iommufd_ctx_put(idev->ictx);
30 * iommufd_device_bind - Bind a physical device to an iommu fd
31 * @ictx: iommufd file descriptor
32 * @dev: Pointer to a physical device struct
33 * @id: Output ID number to return to userspace for this device
35 * A successful bind establishes an ownership over the device and returns
36 * struct iommufd_device pointer, otherwise returns error pointer.
38 * A driver using this API must set driver_managed_dma and must not touch
39 * the device until this routine succeeds and establishes ownership.
41 * Binding a PCI device places the entire RID under iommufd control.
43 * The caller must undo this with iommufd_device_unbind()
45 struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
46 struct device *dev, u32 *id)
48 struct iommufd_device *idev;
49 struct iommu_group *group;
53 * iommufd always sets IOMMU_CACHE because we offer no way for userspace
54 * to restore cache coherency.
56 if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY))
57 return ERR_PTR(-EINVAL);
59 group = iommu_group_get(dev);
61 return ERR_PTR(-ENODEV);
63 rc = iommu_device_claim_dma_owner(dev, ictx);
67 idev = iommufd_object_alloc(ictx, idev, IOMMUFD_OBJ_DEVICE);
70 goto out_release_owner;
73 if (!iommufd_selftest_is_mock_dev(dev))
74 iommufd_ctx_get(ictx);
76 idev->enforce_cache_coherency =
77 device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY);
78 /* The calling driver is a user until iommufd_device_unbind() */
79 refcount_inc(&idev->obj.users);
80 /* group refcount moves into iommufd_device */
84 * If the caller fails after this success it must call
85 * iommufd_unbind_device() which is safe since we hold this refcount.
86 * This also means the device is a leaf in the graph and no other object
87 * can take a reference on it.
89 iommufd_object_finalize(ictx, &idev->obj);
94 iommu_device_release_dma_owner(dev);
96 iommu_group_put(group);
99 EXPORT_SYMBOL_NS_GPL(iommufd_device_bind, IOMMUFD);
102 * iommufd_device_unbind - Undo iommufd_device_bind()
103 * @idev: Device returned by iommufd_device_bind()
105 * Release the device from iommufd control. The DMA ownership will return back
106 * to unowned with DMA controlled by the DMA API. This invalidates the
107 * iommufd_device pointer, other APIs that consume it must not be called
110 void iommufd_device_unbind(struct iommufd_device *idev)
112 iommufd_object_destroy_user(idev->ictx, &idev->obj);
114 EXPORT_SYMBOL_NS_GPL(iommufd_device_unbind, IOMMUFD);
116 static int iommufd_device_setup_msi(struct iommufd_device *idev,
117 struct iommufd_hw_pagetable *hwpt,
118 phys_addr_t sw_msi_start)
123 * If the IOMMU driver gives a IOMMU_RESV_SW_MSI then it is asking us to
124 * call iommu_get_msi_cookie() on its behalf. This is necessary to setup
125 * the MSI window so iommu_dma_prepare_msi() can install pages into our
126 * domain after request_irq(). If it is not done interrupts will not
127 * work on this domain.
129 * FIXME: This is conceptually broken for iommufd since we want to allow
130 * userspace to change the domains, eg switch from an identity IOAS to a
131 * DMA IOAS. There is currently no way to create a MSI window that
132 * matches what the IRQ layer actually expects in a newly created
135 if (sw_msi_start != PHYS_ADDR_MAX && !hwpt->msi_cookie) {
136 rc = iommu_get_msi_cookie(hwpt->domain, sw_msi_start);
141 * iommu_get_msi_cookie() can only be called once per domain,
142 * it returns -EBUSY on later calls.
144 hwpt->msi_cookie = true;
148 * For historical compat with VFIO the insecure interrupt path is
149 * allowed if the module parameter is set. Insecure means that a MemWr
150 * operation from the device (eg a simple DMA) cannot trigger an
151 * interrupt outside this iommufd context.
153 if (!iommufd_selftest_is_mock_dev(idev->dev) &&
154 !iommu_group_has_isolated_msi(idev->group)) {
155 if (!allow_unsafe_interrupts)
160 "MSI interrupts are not secure, they cannot be isolated by the platform. "
161 "Check that platform features like interrupt remapping are enabled. "
162 "Use the \"allow_unsafe_interrupts\" module parameter to override\n");
167 static bool iommufd_hw_pagetable_has_group(struct iommufd_hw_pagetable *hwpt,
168 struct iommu_group *group)
170 struct iommufd_device *cur_dev;
172 lockdep_assert_held(&hwpt->devices_lock);
174 list_for_each_entry(cur_dev, &hwpt->devices, devices_item)
175 if (cur_dev->group == group)
180 int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
181 struct iommufd_device *idev)
183 phys_addr_t sw_msi_start = PHYS_ADDR_MAX;
186 lockdep_assert_held(&hwpt->devices_lock);
188 if (WARN_ON(idev->hwpt))
192 * Try to upgrade the domain we have, it is an iommu driver bug to
193 * report IOMMU_CAP_ENFORCE_CACHE_COHERENCY but fail
194 * enforce_cache_coherency when there are no devices attached to the
197 if (idev->enforce_cache_coherency && !hwpt->enforce_cache_coherency) {
198 if (hwpt->domain->ops->enforce_cache_coherency)
199 hwpt->enforce_cache_coherency =
200 hwpt->domain->ops->enforce_cache_coherency(
202 if (!hwpt->enforce_cache_coherency) {
203 WARN_ON(list_empty(&hwpt->devices));
208 rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev,
209 idev->group, &sw_msi_start);
213 rc = iommufd_device_setup_msi(idev, hwpt, sw_msi_start);
218 * FIXME: Hack around missing a device-centric iommu api, only attach to
219 * the group once for the first device that is in the group.
221 if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) {
222 rc = iommu_attach_group(hwpt->domain, idev->group);
228 iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
232 void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt,
233 struct iommufd_device *idev)
235 if (!iommufd_hw_pagetable_has_group(hwpt, idev->group))
236 iommu_detach_group(hwpt->domain, idev->group);
237 iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
240 static int iommufd_device_do_attach(struct iommufd_device *idev,
241 struct iommufd_hw_pagetable *hwpt)
245 mutex_lock(&hwpt->devices_lock);
246 rc = iommufd_hw_pagetable_attach(hwpt, idev);
251 refcount_inc(&hwpt->obj.users);
252 list_add(&idev->devices_item, &hwpt->devices);
254 mutex_unlock(&hwpt->devices_lock);
259 * When automatically managing the domains we search for a compatible domain in
260 * the iopt and if one is found use it, otherwise create a new domain.
261 * Automatic domain selection will never pick a manually created domain.
263 static int iommufd_device_auto_get_domain(struct iommufd_device *idev,
264 struct iommufd_ioas *ioas)
266 struct iommufd_hw_pagetable *hwpt;
270 * There is no differentiation when domains are allocated, so any domain
271 * that is willing to attach to the device is interchangeable with any
274 mutex_lock(&ioas->mutex);
275 list_for_each_entry(hwpt, &ioas->hwpt_list, hwpt_item) {
276 if (!hwpt->auto_domain)
279 if (!iommufd_lock_obj(&hwpt->obj))
281 rc = iommufd_device_do_attach(idev, hwpt);
282 iommufd_put_object(&hwpt->obj);
285 * -EINVAL means the domain is incompatible with the device.
286 * Other error codes should propagate to userspace as failure.
287 * Success means the domain is attached.
294 hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, true);
299 hwpt->auto_domain = true;
301 mutex_unlock(&ioas->mutex);
302 iommufd_object_finalize(idev->ictx, &hwpt->obj);
305 mutex_unlock(&ioas->mutex);
310 * iommufd_device_attach - Connect a device from an iommu_domain
311 * @idev: device to attach
312 * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HW_PAGETABLE
313 * Output the IOMMUFD_OBJ_HW_PAGETABLE ID
315 * This connects the device to an iommu_domain, either automatically or manually
316 * selected. Once this completes the device could do DMA.
318 * The caller should return the resulting pt_id back to userspace.
319 * This function is undone by calling iommufd_device_detach().
321 int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
323 struct iommufd_object *pt_obj;
326 pt_obj = iommufd_get_object(idev->ictx, *pt_id, IOMMUFD_OBJ_ANY);
328 return PTR_ERR(pt_obj);
330 switch (pt_obj->type) {
331 case IOMMUFD_OBJ_HW_PAGETABLE: {
332 struct iommufd_hw_pagetable *hwpt =
333 container_of(pt_obj, struct iommufd_hw_pagetable, obj);
335 rc = iommufd_device_do_attach(idev, hwpt);
340 case IOMMUFD_OBJ_IOAS: {
341 struct iommufd_ioas *ioas =
342 container_of(pt_obj, struct iommufd_ioas, obj);
344 rc = iommufd_device_auto_get_domain(idev, ioas);
354 refcount_inc(&idev->obj.users);
355 *pt_id = idev->hwpt->obj.id;
359 iommufd_put_object(pt_obj);
362 EXPORT_SYMBOL_NS_GPL(iommufd_device_attach, IOMMUFD);
365 * iommufd_device_detach - Disconnect a device to an iommu_domain
366 * @idev: device to detach
368 * Undo iommufd_device_attach(). This disconnects the idev from the previously
369 * attached pt_id. The device returns back to a blocked DMA translation.
371 void iommufd_device_detach(struct iommufd_device *idev)
373 struct iommufd_hw_pagetable *hwpt = idev->hwpt;
375 mutex_lock(&hwpt->devices_lock);
376 list_del(&idev->devices_item);
378 iommufd_hw_pagetable_detach(hwpt, idev);
379 mutex_unlock(&hwpt->devices_lock);
381 if (hwpt->auto_domain)
382 iommufd_object_deref_user(idev->ictx, &hwpt->obj);
384 refcount_dec(&hwpt->obj.users);
386 refcount_dec(&idev->obj.users);
388 EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
390 void iommufd_access_destroy_object(struct iommufd_object *obj)
392 struct iommufd_access *access =
393 container_of(obj, struct iommufd_access, obj);
396 iopt_remove_access(&access->ioas->iopt, access);
397 refcount_dec(&access->ioas->obj.users);
400 iommufd_ctx_put(access->ictx);
404 * iommufd_access_create - Create an iommufd_access
405 * @ictx: iommufd file descriptor
406 * @ops: Driver's ops to associate with the access
407 * @data: Opaque data to pass into ops functions
408 * @id: Output ID number to return to userspace for this access
410 * An iommufd_access allows a driver to read/write to the IOAS without using
411 * DMA. The underlying CPU memory can be accessed using the
412 * iommufd_access_pin_pages() or iommufd_access_rw() functions.
414 * The provided ops are required to use iommufd_access_pin_pages().
416 struct iommufd_access *
417 iommufd_access_create(struct iommufd_ctx *ictx,
418 const struct iommufd_access_ops *ops, void *data, u32 *id)
420 struct iommufd_access *access;
423 * There is no uAPI for the access object, but to keep things symmetric
424 * use the object infrastructure anyhow.
426 access = iommufd_object_alloc(ictx, access, IOMMUFD_OBJ_ACCESS);
433 if (ops->needs_pin_pages)
434 access->iova_alignment = PAGE_SIZE;
436 access->iova_alignment = 1;
438 /* The calling driver is a user until iommufd_access_destroy() */
439 refcount_inc(&access->obj.users);
441 iommufd_ctx_get(ictx);
442 iommufd_object_finalize(ictx, &access->obj);
443 *id = access->obj.id;
446 EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD);
449 * iommufd_access_destroy - Destroy an iommufd_access
450 * @access: The access to destroy
452 * The caller must stop using the access before destroying it.
454 void iommufd_access_destroy(struct iommufd_access *access)
456 iommufd_object_destroy_user(access->ictx, &access->obj);
458 EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
460 int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id)
462 struct iommufd_ioas *new_ioas;
468 new_ioas = iommufd_get_ioas(access->ictx, ioas_id);
469 if (IS_ERR(new_ioas))
470 return PTR_ERR(new_ioas);
472 rc = iopt_add_access(&new_ioas->iopt, access);
474 iommufd_put_object(&new_ioas->obj);
477 iommufd_ref_to_users(&new_ioas->obj);
479 access->ioas = new_ioas;
482 EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, IOMMUFD);
485 * iommufd_access_notify_unmap - Notify users of an iopt to stop using it
486 * @iopt: iopt to work on
487 * @iova: Starting iova in the iopt
488 * @length: Number of bytes
490 * After this function returns there should be no users attached to the pages
491 * linked to this iopt that intersect with iova,length. Anyone that has attached
492 * a user through iopt_access_pages() needs to detach it through
493 * iommufd_access_unpin_pages() before this function returns.
495 * iommufd_access_destroy() will wait for any outstanding unmap callback to
496 * complete. Once iommufd_access_destroy() no unmap ops are running or will
497 * run in the future. Due to this a driver must not create locking that prevents
498 * unmap to complete while iommufd_access_destroy() is running.
500 void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
501 unsigned long length)
503 struct iommufd_ioas *ioas =
504 container_of(iopt, struct iommufd_ioas, iopt);
505 struct iommufd_access *access;
508 xa_lock(&ioas->iopt.access_list);
509 xa_for_each(&ioas->iopt.access_list, index, access) {
510 if (!iommufd_lock_obj(&access->obj))
512 xa_unlock(&ioas->iopt.access_list);
514 access->ops->unmap(access->data, iova, length);
516 iommufd_put_object(&access->obj);
517 xa_lock(&ioas->iopt.access_list);
519 xa_unlock(&ioas->iopt.access_list);
523 * iommufd_access_unpin_pages() - Undo iommufd_access_pin_pages
524 * @access: IOAS access to act on
525 * @iova: Starting IOVA
526 * @length: Number of bytes to access
528 * Return the struct page's. The caller must stop accessing them before calling
529 * this. The iova/length must exactly match the one provided to access_pages.
531 void iommufd_access_unpin_pages(struct iommufd_access *access,
532 unsigned long iova, unsigned long length)
534 struct io_pagetable *iopt = &access->ioas->iopt;
535 struct iopt_area_contig_iter iter;
536 unsigned long last_iova;
537 struct iopt_area *area;
539 if (WARN_ON(!length) ||
540 WARN_ON(check_add_overflow(iova, length - 1, &last_iova)))
543 down_read(&iopt->iova_rwsem);
544 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
545 iopt_area_remove_access(
546 area, iopt_area_iova_to_index(area, iter.cur_iova),
547 iopt_area_iova_to_index(
549 min(last_iova, iopt_area_last_iova(area))));
550 WARN_ON(!iopt_area_contig_done(&iter));
551 up_read(&iopt->iova_rwsem);
553 EXPORT_SYMBOL_NS_GPL(iommufd_access_unpin_pages, IOMMUFD);
555 static bool iopt_area_contig_is_aligned(struct iopt_area_contig_iter *iter)
557 if (iopt_area_start_byte(iter->area, iter->cur_iova) % PAGE_SIZE)
560 if (!iopt_area_contig_done(iter) &&
561 (iopt_area_start_byte(iter->area, iopt_area_last_iova(iter->area)) %
562 PAGE_SIZE) != (PAGE_SIZE - 1))
567 static bool check_area_prot(struct iopt_area *area, unsigned int flags)
569 if (flags & IOMMUFD_ACCESS_RW_WRITE)
570 return area->iommu_prot & IOMMU_WRITE;
571 return area->iommu_prot & IOMMU_READ;
575 * iommufd_access_pin_pages() - Return a list of pages under the iova
576 * @access: IOAS access to act on
577 * @iova: Starting IOVA
578 * @length: Number of bytes to access
579 * @out_pages: Output page list
580 * @flags: IOPMMUFD_ACCESS_RW_* flags
582 * Reads @length bytes starting at iova and returns the struct page * pointers.
583 * These can be kmap'd by the caller for CPU access.
585 * The caller must perform iommufd_access_unpin_pages() when done to balance
588 * This API always requires a page aligned iova. This happens naturally if the
589 * ioas alignment is >= PAGE_SIZE and the iova is PAGE_SIZE aligned. However
590 * smaller alignments have corner cases where this API can fail on otherwise
593 int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
594 unsigned long length, struct page **out_pages,
597 struct io_pagetable *iopt = &access->ioas->iopt;
598 struct iopt_area_contig_iter iter;
599 unsigned long last_iova;
600 struct iopt_area *area;
603 /* Driver's ops don't support pin_pages */
604 if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
605 WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap))
610 if (check_add_overflow(iova, length - 1, &last_iova))
613 down_read(&iopt->iova_rwsem);
614 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
615 unsigned long last = min(last_iova, iopt_area_last_iova(area));
616 unsigned long last_index = iopt_area_iova_to_index(area, last);
617 unsigned long index =
618 iopt_area_iova_to_index(area, iter.cur_iova);
620 if (area->prevent_access ||
621 !iopt_area_contig_is_aligned(&iter)) {
626 if (!check_area_prot(area, flags)) {
631 rc = iopt_area_add_access(area, index, last_index, out_pages,
635 out_pages += last_index - index + 1;
637 if (!iopt_area_contig_done(&iter)) {
642 up_read(&iopt->iova_rwsem);
646 if (iova < iter.cur_iova) {
647 last_iova = iter.cur_iova - 1;
648 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova)
649 iopt_area_remove_access(
651 iopt_area_iova_to_index(area, iter.cur_iova),
652 iopt_area_iova_to_index(
654 iopt_area_last_iova(area))));
656 up_read(&iopt->iova_rwsem);
659 EXPORT_SYMBOL_NS_GPL(iommufd_access_pin_pages, IOMMUFD);
662 * iommufd_access_rw - Read or write data under the iova
663 * @access: IOAS access to act on
664 * @iova: Starting IOVA
665 * @data: Kernel buffer to copy to/from
666 * @length: Number of bytes to access
667 * @flags: IOMMUFD_ACCESS_RW_* flags
669 * Copy kernel to/from data into the range given by IOVA/length. If flags
670 * indicates IOMMUFD_ACCESS_RW_KTHREAD then a large copy can be optimized
671 * by changing it into copy_to/from_user().
673 int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
674 void *data, size_t length, unsigned int flags)
676 struct io_pagetable *iopt = &access->ioas->iopt;
677 struct iopt_area_contig_iter iter;
678 struct iopt_area *area;
679 unsigned long last_iova;
684 if (check_add_overflow(iova, length - 1, &last_iova))
687 down_read(&iopt->iova_rwsem);
688 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) {
689 unsigned long last = min(last_iova, iopt_area_last_iova(area));
690 unsigned long bytes = (last - iter.cur_iova) + 1;
692 if (area->prevent_access) {
697 if (!check_area_prot(area, flags)) {
702 rc = iopt_pages_rw_access(
703 area->pages, iopt_area_start_byte(area, iter.cur_iova),
709 if (!iopt_area_contig_done(&iter))
712 up_read(&iopt->iova_rwsem);
715 EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD);