1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 Intel Corporation
3 * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
5 * iommufd provides control over the IOMMU HW objects created by IOMMU kernel
6 * drivers. IOMMU HW objects revolve around IO page tables that map incoming DMA
7 * addresses (IOVA) to CPU addresses.
9 #define pr_fmt(fmt) "iommufd: " fmt
11 #include <linux/file.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/miscdevice.h>
16 #include <linux/mutex.h>
17 #include <linux/bug.h>
18 #include <uapi/linux/iommufd.h>
19 #include <linux/iommufd.h>
21 #include "io_pagetable.h"
22 #include "iommufd_private.h"
23 #include "iommufd_test.h"
25 struct iommufd_object_ops {
26 void (*destroy)(struct iommufd_object *obj);
28 static const struct iommufd_object_ops iommufd_object_ops[];
29 static struct miscdevice vfio_misc_dev;
31 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
33 enum iommufd_object_type type)
35 struct iommufd_object *obj;
38 obj = kzalloc(size, GFP_KERNEL_ACCOUNT);
40 return ERR_PTR(-ENOMEM);
42 init_rwsem(&obj->destroy_rwsem);
43 refcount_set(&obj->users, 1);
46 * Reserve an ID in the xarray but do not publish the pointer yet since
47 * the caller hasn't initialized it yet. Once the pointer is published
48 * in the xarray and visible to other threads we can't reliably destroy
49 * it anymore, so the caller must complete all errorable operations
50 * before calling iommufd_object_finalize().
52 rc = xa_alloc(&ictx->objects, &obj->id, XA_ZERO_ENTRY,
53 xa_limit_31b, GFP_KERNEL_ACCOUNT);
63 * Allow concurrent access to the object.
65 * Once another thread can see the object pointer it can prevent object
66 * destruction. Expect for special kernel-only objects there is no in-kernel way
67 * to reliably destroy a single object. Thus all APIs that are creating objects
68 * must use iommufd_object_abort() to handle their errors and only call
69 * iommufd_object_finalize() once object creation cannot fail.
71 void iommufd_object_finalize(struct iommufd_ctx *ictx,
72 struct iommufd_object *obj)
76 old = xa_store(&ictx->objects, obj->id, obj, GFP_KERNEL);
77 /* obj->id was returned from xa_alloc() so the xa_store() cannot fail */
81 /* Undo _iommufd_object_alloc() if iommufd_object_finalize() was not called */
82 void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
86 old = xa_erase(&ictx->objects, obj->id);
92 * Abort an object that has been fully initialized and needs destroy, but has
95 void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
96 struct iommufd_object *obj)
98 iommufd_object_ops[obj->type].destroy(obj);
99 iommufd_object_abort(ictx, obj);
102 struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
103 enum iommufd_object_type type)
105 struct iommufd_object *obj;
107 if (iommufd_should_fail())
108 return ERR_PTR(-ENOENT);
110 xa_lock(&ictx->objects);
111 obj = xa_load(&ictx->objects, id);
112 if (!obj || (type != IOMMUFD_OBJ_ANY && obj->type != type) ||
113 !iommufd_lock_obj(obj))
114 obj = ERR_PTR(-ENOENT);
115 xa_unlock(&ictx->objects);
120 * Remove the given object id from the xarray if the only reference to the
121 * object is held by the xarray. The caller must call ops destroy().
123 static struct iommufd_object *iommufd_object_remove(struct iommufd_ctx *ictx,
124 u32 id, bool extra_put)
126 struct iommufd_object *obj;
127 XA_STATE(xas, &ictx->objects, id);
129 xa_lock(&ictx->objects);
130 obj = xas_load(&xas);
131 if (xa_is_zero(obj) || !obj) {
132 obj = ERR_PTR(-ENOENT);
137 * If the caller is holding a ref on obj we put it here under the
141 refcount_dec(&obj->users);
143 if (!refcount_dec_if_one(&obj->users)) {
144 obj = ERR_PTR(-EBUSY);
148 xas_store(&xas, NULL);
149 if (ictx->vfio_ioas == container_of(obj, struct iommufd_ioas, obj))
150 ictx->vfio_ioas = NULL;
153 xa_unlock(&ictx->objects);
155 /* The returned object reference count is zero */
160 * The caller holds a users refcount and wants to destroy the object. Returns
161 * true if the object was destroyed. In all cases the caller no longer has a
164 void __iommufd_object_destroy_user(struct iommufd_ctx *ictx,
165 struct iommufd_object *obj, bool allow_fail)
167 struct iommufd_object *ret;
170 * The purpose of the destroy_rwsem is to ensure deterministic
171 * destruction of objects used by external drivers and destroyed by this
172 * function. Any temporary increment of the refcount must hold the read
173 * side of this, such as during ioctl execution.
175 down_write(&obj->destroy_rwsem);
176 ret = iommufd_object_remove(ictx, obj->id, true);
177 up_write(&obj->destroy_rwsem);
179 if (allow_fail && IS_ERR(ret))
183 * If there is a bug and we couldn't destroy the object then we did put
184 * back the caller's refcount and will eventually try to free it again
187 if (WARN_ON(IS_ERR(ret)))
190 iommufd_object_ops[obj->type].destroy(obj);
194 static int iommufd_destroy(struct iommufd_ucmd *ucmd)
196 struct iommu_destroy *cmd = ucmd->cmd;
197 struct iommufd_object *obj;
199 obj = iommufd_object_remove(ucmd->ictx, cmd->id, false);
202 iommufd_object_ops[obj->type].destroy(obj);
207 static int iommufd_fops_open(struct inode *inode, struct file *filp)
209 struct iommufd_ctx *ictx;
211 ictx = kzalloc(sizeof(*ictx), GFP_KERNEL_ACCOUNT);
216 * For compatibility with VFIO when /dev/vfio/vfio is opened we default
217 * to the same rlimit accounting as vfio uses.
219 if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER) &&
220 filp->private_data == &vfio_misc_dev) {
221 ictx->account_mode = IOPT_PAGES_ACCOUNT_MM;
222 pr_info_once("IOMMUFD is providing /dev/vfio/vfio, not VFIO.\n");
225 xa_init_flags(&ictx->objects, XA_FLAGS_ALLOC1 | XA_FLAGS_ACCOUNT);
227 filp->private_data = ictx;
231 static int iommufd_fops_release(struct inode *inode, struct file *filp)
233 struct iommufd_ctx *ictx = filp->private_data;
234 struct iommufd_object *obj;
237 * The objects in the xarray form a graph of "users" counts, and we have
238 * to destroy them in a depth first manner. Leaf objects will reduce the
239 * users count of interior objects when they are destroyed.
241 * Repeatedly destroying all the "1 users" leaf objects will progress
242 * until the entire list is destroyed. If this can't progress then there
243 * is some bug related to object refcounting.
245 while (!xa_empty(&ictx->objects)) {
246 unsigned int destroyed = 0;
249 xa_for_each(&ictx->objects, index, obj) {
250 if (!refcount_dec_if_one(&obj->users))
253 xa_erase(&ictx->objects, index);
254 iommufd_object_ops[obj->type].destroy(obj);
257 /* Bug related to users refcount */
258 if (WARN_ON(!destroyed))
265 static int iommufd_option(struct iommufd_ucmd *ucmd)
267 struct iommu_option *cmd = ucmd->cmd;
273 switch (cmd->option_id) {
274 case IOMMU_OPTION_RLIMIT_MODE:
275 rc = iommufd_option_rlimit_mode(cmd, ucmd->ictx);
277 case IOMMU_OPTION_HUGE_PAGES:
278 rc = iommufd_ioas_option(ucmd);
285 if (copy_to_user(&((struct iommu_option __user *)ucmd->ubuffer)->val64,
286 &cmd->val64, sizeof(cmd->val64)))
292 struct iommu_destroy destroy;
293 struct iommu_ioas_alloc alloc;
294 struct iommu_ioas_allow_iovas allow_iovas;
295 struct iommu_ioas_copy ioas_copy;
296 struct iommu_ioas_iova_ranges iova_ranges;
297 struct iommu_ioas_map map;
298 struct iommu_ioas_unmap unmap;
299 struct iommu_option option;
300 struct iommu_vfio_ioas vfio_ioas;
301 #ifdef CONFIG_IOMMUFD_TEST
302 struct iommu_test_cmd test;
306 struct iommufd_ioctl_op {
308 unsigned int min_size;
309 unsigned int ioctl_num;
310 int (*execute)(struct iommufd_ucmd *ucmd);
313 #define IOCTL_OP(_ioctl, _fn, _struct, _last) \
314 [_IOC_NR(_ioctl) - IOMMUFD_CMD_BASE] = { \
315 .size = sizeof(_struct) + \
316 BUILD_BUG_ON_ZERO(sizeof(union ucmd_buffer) < \
318 .min_size = offsetofend(_struct, _last), \
319 .ioctl_num = _ioctl, \
322 static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
323 IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
324 IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl,
325 struct iommu_ioas_alloc, out_ioas_id),
326 IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas,
327 struct iommu_ioas_allow_iovas, allowed_iovas),
328 IOCTL_OP(IOMMU_IOAS_COPY, iommufd_ioas_copy, struct iommu_ioas_copy,
330 IOCTL_OP(IOMMU_IOAS_IOVA_RANGES, iommufd_ioas_iova_ranges,
331 struct iommu_ioas_iova_ranges, out_iova_alignment),
332 IOCTL_OP(IOMMU_IOAS_MAP, iommufd_ioas_map, struct iommu_ioas_map,
334 IOCTL_OP(IOMMU_IOAS_UNMAP, iommufd_ioas_unmap, struct iommu_ioas_unmap,
336 IOCTL_OP(IOMMU_OPTION, iommufd_option, struct iommu_option,
338 IOCTL_OP(IOMMU_VFIO_IOAS, iommufd_vfio_ioas, struct iommu_vfio_ioas,
340 #ifdef CONFIG_IOMMUFD_TEST
341 IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last),
345 static long iommufd_fops_ioctl(struct file *filp, unsigned int cmd,
348 struct iommufd_ctx *ictx = filp->private_data;
349 const struct iommufd_ioctl_op *op;
350 struct iommufd_ucmd ucmd = {};
351 union ucmd_buffer buf;
356 if (nr < IOMMUFD_CMD_BASE ||
357 (nr - IOMMUFD_CMD_BASE) >= ARRAY_SIZE(iommufd_ioctl_ops))
358 return iommufd_vfio_ioctl(ictx, cmd, arg);
361 ucmd.ubuffer = (void __user *)arg;
362 ret = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer);
366 op = &iommufd_ioctl_ops[nr - IOMMUFD_CMD_BASE];
367 if (op->ioctl_num != cmd)
369 if (ucmd.user_size < op->min_size)
373 ret = copy_struct_from_user(ucmd.cmd, op->size, ucmd.ubuffer,
377 ret = op->execute(&ucmd);
381 static const struct file_operations iommufd_fops = {
382 .owner = THIS_MODULE,
383 .open = iommufd_fops_open,
384 .release = iommufd_fops_release,
385 .unlocked_ioctl = iommufd_fops_ioctl,
389 * iommufd_ctx_get - Get a context reference
390 * @ictx: Context to get
392 * The caller must already hold a valid reference to ictx.
394 void iommufd_ctx_get(struct iommufd_ctx *ictx)
396 get_file(ictx->file);
398 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_get, IOMMUFD);
401 * iommufd_ctx_from_file - Acquires a reference to the iommufd context
402 * @file: File to obtain the reference from
404 * Returns a pointer to the iommufd_ctx, otherwise ERR_PTR. The struct file
405 * remains owned by the caller and the caller must still do fput. On success
406 * the caller is responsible to call iommufd_ctx_put().
408 struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
410 struct iommufd_ctx *ictx;
412 if (file->f_op != &iommufd_fops)
413 return ERR_PTR(-EBADFD);
414 ictx = file->private_data;
415 iommufd_ctx_get(ictx);
418 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_file, IOMMUFD);
421 * iommufd_ctx_from_fd - Acquires a reference to the iommufd context
422 * @fd: File descriptor to obtain the reference from
424 * Returns a pointer to the iommufd_ctx, otherwise ERR_PTR. On success
425 * the caller is responsible to call iommufd_ctx_put().
427 struct iommufd_ctx *iommufd_ctx_from_fd(int fd)
433 return ERR_PTR(-EBADF);
435 if (file->f_op != &iommufd_fops) {
437 return ERR_PTR(-EBADFD);
439 /* fget is the same as iommufd_ctx_get() */
440 return file->private_data;
442 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_from_fd, IOMMUFD);
445 * iommufd_ctx_put - Put back a reference
446 * @ictx: Context to put back
448 void iommufd_ctx_put(struct iommufd_ctx *ictx)
452 EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD);
454 static const struct iommufd_object_ops iommufd_object_ops[] = {
455 [IOMMUFD_OBJ_ACCESS] = {
456 .destroy = iommufd_access_destroy_object,
458 [IOMMUFD_OBJ_DEVICE] = {
459 .destroy = iommufd_device_destroy,
461 [IOMMUFD_OBJ_IOAS] = {
462 .destroy = iommufd_ioas_destroy,
464 [IOMMUFD_OBJ_HW_PAGETABLE] = {
465 .destroy = iommufd_hw_pagetable_destroy,
467 #ifdef CONFIG_IOMMUFD_TEST
468 [IOMMUFD_OBJ_SELFTEST] = {
469 .destroy = iommufd_selftest_destroy,
474 static struct miscdevice iommu_misc_dev = {
475 .minor = MISC_DYNAMIC_MINOR,
477 .fops = &iommufd_fops,
483 static struct miscdevice vfio_misc_dev = {
486 .fops = &iommufd_fops,
487 .nodename = "vfio/vfio",
491 static int __init iommufd_init(void)
495 ret = misc_register(&iommu_misc_dev);
499 if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER)) {
500 ret = misc_register(&vfio_misc_dev);
507 misc_deregister(&iommu_misc_dev);
511 static void __exit iommufd_exit(void)
514 if (IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER))
515 misc_deregister(&vfio_misc_dev);
516 misc_deregister(&iommu_misc_dev);
519 module_init(iommufd_init);
520 module_exit(iommufd_exit);
522 #if IS_ENABLED(CONFIG_IOMMUFD_VFIO_CONTAINER)
523 MODULE_ALIAS_MISCDEV(VFIO_MINOR);
524 MODULE_ALIAS("devname:vfio/vfio");
526 MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices");
527 MODULE_LICENSE("GPL");