2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
48 #include <linux/sched/mm.h>
50 #include <linux/uaccess.h>
53 #include <rdma/uverbs_std_types.h>
56 #include "core_priv.h"
57 #include "rdma_core.h"
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
61 MODULE_LICENSE("Dual BSD/GPL");
64 IB_UVERBS_MAJOR = 231,
65 IB_UVERBS_BASE_MINOR = 192,
66 IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
67 IB_UVERBS_NUM_FIXED_MINOR = 32,
68 IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
71 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
73 static dev_t dynamic_uverbs_dev;
74 static struct class *uverbs_class;
76 static DEFINE_IDA(uverbs_ida);
77 static void ib_uverbs_add_one(struct ib_device *device);
78 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
81 * Must be called with the ufile->device->disassociate_srcu held, and the lock
82 * must be held until use of the ucontext is finished.
84 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
87 * We do not hold the hw_destroy_rwsem lock for this flow, instead
88 * srcu is used. It does not matter if someone races this with
89 * get_context, we get NULL or valid ucontext.
91 struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
93 if (!srcu_dereference(ufile->device->ib_dev,
94 &ufile->device->disassociate_srcu))
98 return ERR_PTR(-EINVAL);
102 EXPORT_SYMBOL(ib_uverbs_get_ucontext_file);
104 int uverbs_dealloc_mw(struct ib_mw *mw)
106 struct ib_pd *pd = mw->pd;
109 ret = mw->device->ops.dealloc_mw(mw);
111 atomic_dec(&pd->usecnt);
115 static void ib_uverbs_release_dev(struct device *device)
117 struct ib_uverbs_device *dev =
118 container_of(device, struct ib_uverbs_device, dev);
120 uverbs_destroy_api(dev->uapi);
121 cleanup_srcu_struct(&dev->disassociate_srcu);
125 static void ib_uverbs_release_async_event_file(struct kref *ref)
127 struct ib_uverbs_async_event_file *file =
128 container_of(ref, struct ib_uverbs_async_event_file, ref);
133 void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
134 struct ib_uverbs_completion_event_file *ev_file,
135 struct ib_ucq_object *uobj)
137 struct ib_uverbs_event *evt, *tmp;
140 spin_lock_irq(&ev_file->ev_queue.lock);
141 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
142 list_del(&evt->list);
145 spin_unlock_irq(&ev_file->ev_queue.lock);
147 uverbs_uobject_put(&ev_file->uobj);
150 spin_lock_irq(&file->async_file->ev_queue.lock);
151 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
152 list_del(&evt->list);
155 spin_unlock_irq(&file->async_file->ev_queue.lock);
158 void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
159 struct ib_uevent_object *uobj)
161 struct ib_uverbs_event *evt, *tmp;
163 spin_lock_irq(&file->async_file->ev_queue.lock);
164 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
165 list_del(&evt->list);
168 spin_unlock_irq(&file->async_file->ev_queue.lock);
171 void ib_uverbs_detach_umcast(struct ib_qp *qp,
172 struct ib_uqp_object *uobj)
174 struct ib_uverbs_mcast_entry *mcast, *tmp;
176 list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
177 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
178 list_del(&mcast->list);
183 static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
185 complete(&dev->comp);
188 void ib_uverbs_release_file(struct kref *ref)
190 struct ib_uverbs_file *file =
191 container_of(ref, struct ib_uverbs_file, ref);
192 struct ib_device *ib_dev;
195 release_ufile_idr_uobject(file);
197 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
198 ib_dev = srcu_dereference(file->device->ib_dev,
199 &file->device->disassociate_srcu);
200 if (ib_dev && !ib_dev->ops.disassociate_ucontext)
201 module_put(ib_dev->owner);
202 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
204 if (atomic_dec_and_test(&file->device->refcount))
205 ib_uverbs_comp_dev(file->device);
207 if (file->async_file)
208 kref_put(&file->async_file->ref,
209 ib_uverbs_release_async_event_file);
210 put_device(&file->device->dev);
214 static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
215 struct ib_uverbs_file *uverbs_file,
216 struct file *filp, char __user *buf,
217 size_t count, loff_t *pos,
220 struct ib_uverbs_event *event;
223 spin_lock_irq(&ev_queue->lock);
225 while (list_empty(&ev_queue->event_list)) {
226 spin_unlock_irq(&ev_queue->lock);
228 if (filp->f_flags & O_NONBLOCK)
231 if (wait_event_interruptible(ev_queue->poll_wait,
232 (!list_empty(&ev_queue->event_list) ||
233 /* The barriers built into wait_event_interruptible()
234 * and wake_up() guarentee this will see the null set
237 !uverbs_file->device->ib_dev)))
240 /* If device was disassociated and no event exists set an error */
241 if (list_empty(&ev_queue->event_list) &&
242 !uverbs_file->device->ib_dev)
245 spin_lock_irq(&ev_queue->lock);
248 event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
250 if (eventsz > count) {
254 list_del(ev_queue->event_list.next);
255 if (event->counter) {
257 list_del(&event->obj_list);
261 spin_unlock_irq(&ev_queue->lock);
264 if (copy_to_user(buf, event, eventsz))
275 static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
276 size_t count, loff_t *pos)
278 struct ib_uverbs_async_event_file *file = filp->private_data;
280 return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
282 sizeof(struct ib_uverbs_async_event_desc));
285 static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
286 size_t count, loff_t *pos)
288 struct ib_uverbs_completion_event_file *comp_ev_file =
291 return ib_uverbs_event_read(&comp_ev_file->ev_queue,
292 comp_ev_file->uobj.ufile, filp,
294 sizeof(struct ib_uverbs_comp_event_desc));
297 static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
299 struct poll_table_struct *wait)
301 __poll_t pollflags = 0;
303 poll_wait(filp, &ev_queue->poll_wait, wait);
305 spin_lock_irq(&ev_queue->lock);
306 if (!list_empty(&ev_queue->event_list))
307 pollflags = EPOLLIN | EPOLLRDNORM;
308 spin_unlock_irq(&ev_queue->lock);
313 static __poll_t ib_uverbs_async_event_poll(struct file *filp,
314 struct poll_table_struct *wait)
316 return ib_uverbs_event_poll(filp->private_data, filp, wait);
319 static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
320 struct poll_table_struct *wait)
322 struct ib_uverbs_completion_event_file *comp_ev_file =
325 return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
328 static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
330 struct ib_uverbs_event_queue *ev_queue = filp->private_data;
332 return fasync_helper(fd, filp, on, &ev_queue->async_queue);
335 static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
337 struct ib_uverbs_completion_event_file *comp_ev_file =
340 return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
343 static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
345 struct ib_uverbs_async_event_file *file = filp->private_data;
346 struct ib_uverbs_file *uverbs_file = file->uverbs_file;
347 struct ib_uverbs_event *entry, *tmp;
348 int closed_already = 0;
350 mutex_lock(&uverbs_file->device->lists_mutex);
351 spin_lock_irq(&file->ev_queue.lock);
352 closed_already = file->ev_queue.is_closed;
353 file->ev_queue.is_closed = 1;
354 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
356 list_del(&entry->obj_list);
359 spin_unlock_irq(&file->ev_queue.lock);
360 if (!closed_already) {
361 list_del(&file->list);
362 ib_unregister_event_handler(&uverbs_file->event_handler);
364 mutex_unlock(&uverbs_file->device->lists_mutex);
366 kref_put(&uverbs_file->ref, ib_uverbs_release_file);
367 kref_put(&file->ref, ib_uverbs_release_async_event_file);
372 static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
374 struct ib_uobject *uobj = filp->private_data;
375 struct ib_uverbs_completion_event_file *file = container_of(
376 uobj, struct ib_uverbs_completion_event_file, uobj);
377 struct ib_uverbs_event *entry, *tmp;
379 spin_lock_irq(&file->ev_queue.lock);
380 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
382 list_del(&entry->obj_list);
385 file->ev_queue.is_closed = 1;
386 spin_unlock_irq(&file->ev_queue.lock);
388 uverbs_close_fd(filp);
393 const struct file_operations uverbs_event_fops = {
394 .owner = THIS_MODULE,
395 .read = ib_uverbs_comp_event_read,
396 .poll = ib_uverbs_comp_event_poll,
397 .release = ib_uverbs_comp_event_close,
398 .fasync = ib_uverbs_comp_event_fasync,
402 static const struct file_operations uverbs_async_event_fops = {
403 .owner = THIS_MODULE,
404 .read = ib_uverbs_async_event_read,
405 .poll = ib_uverbs_async_event_poll,
406 .release = ib_uverbs_async_event_close,
407 .fasync = ib_uverbs_async_event_fasync,
411 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
413 struct ib_uverbs_event_queue *ev_queue = cq_context;
414 struct ib_ucq_object *uobj;
415 struct ib_uverbs_event *entry;
421 spin_lock_irqsave(&ev_queue->lock, flags);
422 if (ev_queue->is_closed) {
423 spin_unlock_irqrestore(&ev_queue->lock, flags);
427 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
429 spin_unlock_irqrestore(&ev_queue->lock, flags);
433 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
435 entry->desc.comp.cq_handle = cq->uobject->user_handle;
436 entry->counter = &uobj->comp_events_reported;
438 list_add_tail(&entry->list, &ev_queue->event_list);
439 list_add_tail(&entry->obj_list, &uobj->comp_list);
440 spin_unlock_irqrestore(&ev_queue->lock, flags);
442 wake_up_interruptible(&ev_queue->poll_wait);
443 kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
446 static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
447 __u64 element, __u64 event,
448 struct list_head *obj_list,
451 struct ib_uverbs_event *entry;
454 spin_lock_irqsave(&file->async_file->ev_queue.lock, flags);
455 if (file->async_file->ev_queue.is_closed) {
456 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
460 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
462 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
466 entry->desc.async.element = element;
467 entry->desc.async.event_type = event;
468 entry->desc.async.reserved = 0;
469 entry->counter = counter;
471 list_add_tail(&entry->list, &file->async_file->ev_queue.event_list);
473 list_add_tail(&entry->obj_list, obj_list);
474 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
476 wake_up_interruptible(&file->async_file->ev_queue.poll_wait);
477 kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN);
480 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
482 struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
483 struct ib_ucq_object, uobject);
485 ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
486 event->event, &uobj->async_list,
487 &uobj->async_events_reported);
490 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
492 struct ib_uevent_object *uobj;
494 /* for XRC target qp's, check that qp is live */
495 if (!event->element.qp->uobject)
498 uobj = container_of(event->element.qp->uobject,
499 struct ib_uevent_object, uobject);
501 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
502 event->event, &uobj->event_list,
503 &uobj->events_reported);
506 void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
508 struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
509 struct ib_uevent_object, uobject);
511 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
512 event->event, &uobj->event_list,
513 &uobj->events_reported);
516 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
518 struct ib_uevent_object *uobj;
520 uobj = container_of(event->element.srq->uobject,
521 struct ib_uevent_object, uobject);
523 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
524 event->event, &uobj->event_list,
525 &uobj->events_reported);
528 void ib_uverbs_event_handler(struct ib_event_handler *handler,
529 struct ib_event *event)
531 struct ib_uverbs_file *file =
532 container_of(handler, struct ib_uverbs_file, event_handler);
534 ib_uverbs_async_handler(file, event->element.port_num, event->event,
538 void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
540 kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file);
541 file->async_file = NULL;
544 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
546 spin_lock_init(&ev_queue->lock);
547 INIT_LIST_HEAD(&ev_queue->event_list);
548 init_waitqueue_head(&ev_queue->poll_wait);
549 ev_queue->is_closed = 0;
550 ev_queue->async_queue = NULL;
553 struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
554 struct ib_device *ib_dev)
556 struct ib_uverbs_async_event_file *ev_file;
559 ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
561 return ERR_PTR(-ENOMEM);
563 ib_uverbs_init_event_queue(&ev_file->ev_queue);
564 ev_file->uverbs_file = uverbs_file;
565 kref_get(&ev_file->uverbs_file->ref);
566 kref_init(&ev_file->ref);
567 filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
572 mutex_lock(&uverbs_file->device->lists_mutex);
573 list_add_tail(&ev_file->list,
574 &uverbs_file->device->uverbs_events_file_list);
575 mutex_unlock(&uverbs_file->device->lists_mutex);
577 WARN_ON(uverbs_file->async_file);
578 uverbs_file->async_file = ev_file;
579 kref_get(&uverbs_file->async_file->ref);
580 INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
582 ib_uverbs_event_handler);
583 ib_register_event_handler(&uverbs_file->event_handler);
584 /* At that point async file stuff was fully set */
589 kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
590 kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
594 static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
595 struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count,
596 const struct uverbs_api_write_method *method_elm)
598 if (method_elm->is_ex) {
599 count -= sizeof(*hdr) + sizeof(*ex_hdr);
601 if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count)
604 if (hdr->in_words * 8 < method_elm->req_size)
607 if (ex_hdr->cmd_hdr_reserved)
610 if (ex_hdr->response) {
611 if (!hdr->out_words && !ex_hdr->provider_out_words)
614 if (hdr->out_words * 8 < method_elm->resp_size)
617 if (!access_ok(u64_to_user_ptr(ex_hdr->response),
618 (hdr->out_words + ex_hdr->provider_out_words) * 8))
621 if (hdr->out_words || ex_hdr->provider_out_words)
628 /* not extended command */
629 if (hdr->in_words * 4 != count)
632 if (count < method_elm->req_size + sizeof(hdr)) {
634 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
635 * with a 16 byte write instead of 24. Old kernels didn't
636 * check the size so they allowed this. Now that the size is
637 * checked provide a compatibility work around to not break
640 if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ &&
647 if (hdr->out_words * 4 < method_elm->resp_size)
653 static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
654 size_t count, loff_t *pos)
656 struct ib_uverbs_file *file = filp->private_data;
657 const struct uverbs_api_write_method *method_elm;
658 struct uverbs_api *uapi = file->device->uapi;
659 struct ib_uverbs_ex_cmd_hdr ex_hdr;
660 struct ib_uverbs_cmd_hdr hdr;
661 struct uverbs_attr_bundle bundle;
665 if (!ib_safe_file_access(filp)) {
666 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
667 task_tgid_vnr(current), current->comm);
671 if (count < sizeof(hdr))
674 if (copy_from_user(&hdr, buf, sizeof(hdr)))
677 method_elm = uapi_get_method(uapi, hdr.command);
678 if (IS_ERR(method_elm))
679 return PTR_ERR(method_elm);
681 if (method_elm->is_ex) {
682 if (count < (sizeof(hdr) + sizeof(ex_hdr)))
684 if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
688 ret = verify_hdr(&hdr, &ex_hdr, count, method_elm);
692 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
696 memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
698 if (!method_elm->is_ex) {
699 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
700 size_t out_len = hdr.out_words * 4;
703 if (method_elm->has_udata) {
704 bundle.driver_udata.inlen =
705 in_len - method_elm->req_size;
706 in_len = method_elm->req_size;
707 if (bundle.driver_udata.inlen)
708 bundle.driver_udata.inbuf = buf + in_len;
710 bundle.driver_udata.inbuf = NULL;
712 memset(&bundle.driver_udata, 0,
713 sizeof(bundle.driver_udata));
716 if (method_elm->has_resp) {
718 * The macros check that if has_resp is set
719 * then the command request structure starts
720 * with a '__aligned u64 response' member.
722 ret = get_user(response, (const u64 *)buf);
726 if (method_elm->has_udata) {
727 bundle.driver_udata.outlen =
728 out_len - method_elm->resp_size;
729 out_len = method_elm->resp_size;
730 if (bundle.driver_udata.outlen)
731 bundle.driver_udata.outbuf =
732 u64_to_user_ptr(response +
735 bundle.driver_udata.outbuf = NULL;
738 bundle.driver_udata.outlen = 0;
739 bundle.driver_udata.outbuf = NULL;
742 ib_uverbs_init_udata_buf_or_null(
743 &bundle.ucore, buf, u64_to_user_ptr(response),
746 buf += sizeof(ex_hdr);
748 ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf,
749 u64_to_user_ptr(ex_hdr.response),
750 hdr.in_words * 8, hdr.out_words * 8);
752 ib_uverbs_init_udata_buf_or_null(
753 &bundle.driver_udata, buf + bundle.ucore.inlen,
754 u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen,
755 ex_hdr.provider_in_words * 8,
756 ex_hdr.provider_out_words * 8);
760 ret = method_elm->handler(&bundle);
762 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
763 return (ret) ? : count;
766 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
768 struct ib_uverbs_file *file = filp->private_data;
769 struct ib_ucontext *ucontext;
773 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
774 ucontext = ib_uverbs_get_ucontext_file(file);
775 if (IS_ERR(ucontext)) {
776 ret = PTR_ERR(ucontext);
780 ret = ucontext->device->ops.mmap(ucontext, vma);
782 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
787 * Each time we map IO memory into user space this keeps track of the mapping.
788 * When the device is hot-unplugged we 'zap' the mmaps in user space to point
789 * to the zero page and allow the hot unplug to proceed.
791 * This is necessary for cases like PCI physical hot unplug as the actual BAR
792 * memory may vanish after this and access to it from userspace could MCE.
794 * RDMA drivers supporting disassociation must have their user space designed
795 * to cope in some way with their IO pages going to the zero page.
797 struct rdma_umap_priv {
798 struct vm_area_struct *vma;
799 struct list_head list;
802 static const struct vm_operations_struct rdma_umap_ops;
804 static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
805 struct vm_area_struct *vma)
807 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
810 vma->vm_private_data = priv;
811 vma->vm_ops = &rdma_umap_ops;
813 mutex_lock(&ufile->umap_lock);
814 list_add(&priv->list, &ufile->umaps);
815 mutex_unlock(&ufile->umap_lock);
819 * The VMA has been dup'd, initialize the vm_private_data with a new tracking
822 static void rdma_umap_open(struct vm_area_struct *vma)
824 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
825 struct rdma_umap_priv *opriv = vma->vm_private_data;
826 struct rdma_umap_priv *priv;
831 /* We are racing with disassociation */
832 if (!down_read_trylock(&ufile->hw_destroy_rwsem))
835 * Disassociation already completed, the VMA should already be zapped.
837 if (!ufile->ucontext)
840 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
843 rdma_umap_priv_init(priv, vma);
845 up_read(&ufile->hw_destroy_rwsem);
849 up_read(&ufile->hw_destroy_rwsem);
852 * We can't allow the VMA to be created with the actual IO pages, that
853 * would break our API contract, and it can't be stopped at this
856 vma->vm_private_data = NULL;
857 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
860 static void rdma_umap_close(struct vm_area_struct *vma)
862 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
863 struct rdma_umap_priv *priv = vma->vm_private_data;
869 * The vma holds a reference on the struct file that created it, which
870 * in turn means that the ib_uverbs_file is guaranteed to exist at
873 mutex_lock(&ufile->umap_lock);
874 list_del(&priv->list);
875 mutex_unlock(&ufile->umap_lock);
879 static const struct vm_operations_struct rdma_umap_ops = {
880 .open = rdma_umap_open,
881 .close = rdma_umap_close,
884 static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
885 struct vm_area_struct *vma,
888 struct ib_uverbs_file *ufile = ucontext->ufile;
889 struct rdma_umap_priv *priv;
891 if (vma->vm_end - vma->vm_start != size)
892 return ERR_PTR(-EINVAL);
894 /* Driver is using this wrong, must be called by ib_uverbs_mmap */
895 if (WARN_ON(!vma->vm_file ||
896 vma->vm_file->private_data != ufile))
897 return ERR_PTR(-EINVAL);
898 lockdep_assert_held(&ufile->device->disassociate_srcu);
900 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
902 return ERR_PTR(-ENOMEM);
907 * Map IO memory into a process. This is to be called by drivers as part of
908 * their mmap() functions if they wish to send something like PCI-E BAR memory
911 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
912 unsigned long pfn, unsigned long size, pgprot_t prot)
914 struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
917 return PTR_ERR(priv);
919 vma->vm_page_prot = prot;
920 if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
925 rdma_umap_priv_init(priv, vma);
928 EXPORT_SYMBOL(rdma_user_mmap_io);
931 * The page case is here for a slightly different reason, the driver expects
932 * to be able to free the page it is sharing to user space when it destroys
933 * its ucontext, which means we need to zap the user space references.
935 * We could handle this differently by providing an API to allocate a shared
936 * page and then only freeing the shared page when the last ufile is
939 int rdma_user_mmap_page(struct ib_ucontext *ucontext,
940 struct vm_area_struct *vma, struct page *page,
943 struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
946 return PTR_ERR(priv);
948 if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
949 vma->vm_page_prot)) {
954 rdma_umap_priv_init(priv, vma);
957 EXPORT_SYMBOL(rdma_user_mmap_page);
959 void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
961 struct rdma_umap_priv *priv, *next_priv;
963 lockdep_assert_held(&ufile->hw_destroy_rwsem);
966 struct mm_struct *mm = NULL;
968 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
969 mutex_lock(&ufile->umap_lock);
970 while (!list_empty(&ufile->umaps)) {
973 priv = list_first_entry(&ufile->umaps,
974 struct rdma_umap_priv, list);
975 mm = priv->vma->vm_mm;
976 ret = mmget_not_zero(mm);
978 list_del_init(&priv->list);
984 mutex_unlock(&ufile->umap_lock);
989 * The umap_lock is nested under mmap_sem since it used within
990 * the vma_ops callbacks, so we have to clean the list one mm
991 * at a time to get the lock ordering right. Typically there
992 * will only be one mm, so no big deal.
994 down_write(&mm->mmap_sem);
995 mutex_lock(&ufile->umap_lock);
996 list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
998 struct vm_area_struct *vma = priv->vma;
1000 if (vma->vm_mm != mm)
1002 list_del_init(&priv->list);
1004 zap_vma_ptes(vma, vma->vm_start,
1005 vma->vm_end - vma->vm_start);
1006 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
1008 mutex_unlock(&ufile->umap_lock);
1009 up_write(&mm->mmap_sem);
1015 * ib_uverbs_open() does not need the BKL:
1017 * - the ib_uverbs_device structures are properly reference counted and
1018 * everything else is purely local to the file being created, so
1019 * races against other open calls are not a problem;
1020 * - there is no ioctl method to race against;
1021 * - the open method will either immediately run -ENXIO, or all
1022 * required initialization will be done.
1024 static int ib_uverbs_open(struct inode *inode, struct file *filp)
1026 struct ib_uverbs_device *dev;
1027 struct ib_uverbs_file *file;
1028 struct ib_device *ib_dev;
1030 int module_dependent;
1033 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
1034 if (!atomic_inc_not_zero(&dev->refcount))
1037 get_device(&dev->dev);
1038 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1039 mutex_lock(&dev->lists_mutex);
1040 ib_dev = srcu_dereference(dev->ib_dev,
1041 &dev->disassociate_srcu);
1047 /* In case IB device supports disassociate ucontext, there is no hard
1048 * dependency between uverbs device and its low level device.
1050 module_dependent = !(ib_dev->ops.disassociate_ucontext);
1052 if (module_dependent) {
1053 if (!try_module_get(ib_dev->owner)) {
1059 file = kzalloc(sizeof(*file), GFP_KERNEL);
1062 if (module_dependent)
1069 kref_init(&file->ref);
1070 mutex_init(&file->ucontext_lock);
1072 spin_lock_init(&file->uobjects_lock);
1073 INIT_LIST_HEAD(&file->uobjects);
1074 init_rwsem(&file->hw_destroy_rwsem);
1075 mutex_init(&file->umap_lock);
1076 INIT_LIST_HEAD(&file->umaps);
1078 filp->private_data = file;
1079 list_add_tail(&file->list, &dev->uverbs_file_list);
1080 mutex_unlock(&dev->lists_mutex);
1081 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1083 setup_ufile_idr_uobject(file);
1085 return nonseekable_open(inode, filp);
1088 module_put(ib_dev->owner);
1091 mutex_unlock(&dev->lists_mutex);
1092 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1093 if (atomic_dec_and_test(&dev->refcount))
1094 ib_uverbs_comp_dev(dev);
1096 put_device(&dev->dev);
1100 static int ib_uverbs_close(struct inode *inode, struct file *filp)
1102 struct ib_uverbs_file *file = filp->private_data;
1104 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
1106 mutex_lock(&file->device->lists_mutex);
1107 list_del_init(&file->list);
1108 mutex_unlock(&file->device->lists_mutex);
1110 kref_put(&file->ref, ib_uverbs_release_file);
1115 static const struct file_operations uverbs_fops = {
1116 .owner = THIS_MODULE,
1117 .write = ib_uverbs_write,
1118 .open = ib_uverbs_open,
1119 .release = ib_uverbs_close,
1120 .llseek = no_llseek,
1121 .unlocked_ioctl = ib_uverbs_ioctl,
1122 .compat_ioctl = ib_uverbs_ioctl,
1125 static const struct file_operations uverbs_mmap_fops = {
1126 .owner = THIS_MODULE,
1127 .write = ib_uverbs_write,
1128 .mmap = ib_uverbs_mmap,
1129 .open = ib_uverbs_open,
1130 .release = ib_uverbs_close,
1131 .llseek = no_llseek,
1132 .unlocked_ioctl = ib_uverbs_ioctl,
1133 .compat_ioctl = ib_uverbs_ioctl,
1136 static struct ib_client uverbs_client = {
1138 .add = ib_uverbs_add_one,
1139 .remove = ib_uverbs_remove_one
1142 static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
1145 struct ib_uverbs_device *dev =
1146 container_of(device, struct ib_uverbs_device, dev);
1149 struct ib_device *ib_dev;
1151 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1152 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1154 ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
1155 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1159 static DEVICE_ATTR_RO(ibdev);
1161 static ssize_t abi_version_show(struct device *device,
1162 struct device_attribute *attr, char *buf)
1164 struct ib_uverbs_device *dev =
1165 container_of(device, struct ib_uverbs_device, dev);
1168 struct ib_device *ib_dev;
1170 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1171 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1173 ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
1174 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1178 static DEVICE_ATTR_RO(abi_version);
1180 static struct attribute *ib_dev_attrs[] = {
1181 &dev_attr_abi_version.attr,
1182 &dev_attr_ibdev.attr,
1186 static const struct attribute_group dev_attr_group = {
1187 .attrs = ib_dev_attrs,
1190 static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1191 __stringify(IB_USER_VERBS_ABI_VERSION));
1193 static int ib_uverbs_create_uapi(struct ib_device *device,
1194 struct ib_uverbs_device *uverbs_dev)
1196 struct uverbs_api *uapi;
1198 uapi = uverbs_alloc_api(device);
1200 return PTR_ERR(uapi);
1202 uverbs_dev->uapi = uapi;
1206 static void ib_uverbs_add_one(struct ib_device *device)
1210 struct ib_uverbs_device *uverbs_dev;
1213 if (!device->ops.alloc_ucontext)
1216 uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
1220 ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
1226 device_initialize(&uverbs_dev->dev);
1227 uverbs_dev->dev.class = uverbs_class;
1228 uverbs_dev->dev.parent = device->dev.parent;
1229 uverbs_dev->dev.release = ib_uverbs_release_dev;
1230 uverbs_dev->groups[0] = &dev_attr_group;
1231 uverbs_dev->dev.groups = uverbs_dev->groups;
1232 atomic_set(&uverbs_dev->refcount, 1);
1233 init_completion(&uverbs_dev->comp);
1234 uverbs_dev->xrcd_tree = RB_ROOT;
1235 mutex_init(&uverbs_dev->xrcd_tree_mutex);
1236 mutex_init(&uverbs_dev->lists_mutex);
1237 INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
1238 INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
1239 rcu_assign_pointer(uverbs_dev->ib_dev, device);
1240 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1242 devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
1246 uverbs_dev->devnum = devnum;
1247 if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
1248 base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
1250 base = IB_UVERBS_BASE_DEV + devnum;
1252 if (ib_uverbs_create_uapi(device, uverbs_dev))
1255 uverbs_dev->dev.devt = base;
1256 dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
1258 cdev_init(&uverbs_dev->cdev,
1259 device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
1260 uverbs_dev->cdev.owner = THIS_MODULE;
1262 ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
1266 ib_set_client_data(device, &uverbs_client, uverbs_dev);
1270 ida_free(&uverbs_ida, devnum);
1272 if (atomic_dec_and_test(&uverbs_dev->refcount))
1273 ib_uverbs_comp_dev(uverbs_dev);
1274 wait_for_completion(&uverbs_dev->comp);
1275 put_device(&uverbs_dev->dev);
1279 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1280 struct ib_device *ib_dev)
1282 struct ib_uverbs_file *file;
1283 struct ib_uverbs_async_event_file *event_file;
1284 struct ib_event event;
1286 /* Pending running commands to terminate */
1287 uverbs_disassociate_api_pre(uverbs_dev);
1288 event.event = IB_EVENT_DEVICE_FATAL;
1289 event.element.port_num = 0;
1290 event.device = ib_dev;
1292 mutex_lock(&uverbs_dev->lists_mutex);
1293 while (!list_empty(&uverbs_dev->uverbs_file_list)) {
1294 file = list_first_entry(&uverbs_dev->uverbs_file_list,
1295 struct ib_uverbs_file, list);
1296 list_del_init(&file->list);
1297 kref_get(&file->ref);
1299 /* We must release the mutex before going ahead and calling
1300 * uverbs_cleanup_ufile, as it might end up indirectly calling
1301 * uverbs_close, for example due to freeing the resources (e.g
1304 mutex_unlock(&uverbs_dev->lists_mutex);
1306 ib_uverbs_event_handler(&file->event_handler, &event);
1307 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
1308 kref_put(&file->ref, ib_uverbs_release_file);
1310 mutex_lock(&uverbs_dev->lists_mutex);
1313 while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
1314 event_file = list_first_entry(&uverbs_dev->
1315 uverbs_events_file_list,
1316 struct ib_uverbs_async_event_file,
1318 spin_lock_irq(&event_file->ev_queue.lock);
1319 event_file->ev_queue.is_closed = 1;
1320 spin_unlock_irq(&event_file->ev_queue.lock);
1322 list_del(&event_file->list);
1323 ib_unregister_event_handler(
1324 &event_file->uverbs_file->event_handler);
1325 event_file->uverbs_file->event_handler.device =
1328 wake_up_interruptible(&event_file->ev_queue.poll_wait);
1329 kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
1331 mutex_unlock(&uverbs_dev->lists_mutex);
1333 uverbs_disassociate_api(uverbs_dev->uapi);
1336 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
1338 struct ib_uverbs_device *uverbs_dev = client_data;
1339 int wait_clients = 1;
1344 cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
1345 ida_free(&uverbs_ida, uverbs_dev->devnum);
1347 if (device->ops.disassociate_ucontext) {
1348 /* We disassociate HW resources and immediately return.
1349 * Userspace will see a EIO errno for all future access.
1350 * Upon returning, ib_device may be freed internally and is not
1352 * uverbs_device is still available until all clients close
1353 * their files, then the uverbs device ref count will be zero
1354 * and its resources will be freed.
1355 * Note: At this point no more files can be opened since the
1356 * cdev was deleted, however active clients can still issue
1357 * commands and close their open files.
1359 ib_uverbs_free_hw_resources(uverbs_dev, device);
1363 if (atomic_dec_and_test(&uverbs_dev->refcount))
1364 ib_uverbs_comp_dev(uverbs_dev);
1366 wait_for_completion(&uverbs_dev->comp);
1368 put_device(&uverbs_dev->dev);
1371 static char *uverbs_devnode(struct device *dev, umode_t *mode)
1375 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1378 static int __init ib_uverbs_init(void)
1382 ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
1383 IB_UVERBS_NUM_FIXED_MINOR,
1384 "infiniband_verbs");
1386 pr_err("user_verbs: couldn't register device number\n");
1390 ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
1391 IB_UVERBS_NUM_DYNAMIC_MINOR,
1392 "infiniband_verbs");
1394 pr_err("couldn't register dynamic device number\n");
1398 uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
1399 if (IS_ERR(uverbs_class)) {
1400 ret = PTR_ERR(uverbs_class);
1401 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1405 uverbs_class->devnode = uverbs_devnode;
1407 ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
1409 pr_err("user_verbs: couldn't create abi_version attribute\n");
1413 ret = ib_register_client(&uverbs_client);
1415 pr_err("user_verbs: couldn't register client\n");
1422 class_destroy(uverbs_class);
1425 unregister_chrdev_region(dynamic_uverbs_dev,
1426 IB_UVERBS_NUM_DYNAMIC_MINOR);
1429 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1430 IB_UVERBS_NUM_FIXED_MINOR);
1436 static void __exit ib_uverbs_cleanup(void)
1438 ib_unregister_client(&uverbs_client);
1439 class_destroy(uverbs_class);
1440 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1441 IB_UVERBS_NUM_FIXED_MINOR);
1442 unregister_chrdev_region(dynamic_uverbs_dev,
1443 IB_UVERBS_NUM_DYNAMIC_MINOR);
1446 module_init(ib_uverbs_init);
1447 module_exit(ib_uverbs_cleanup);