2 * VFIO generic eventfd code for IRQFD support.
3 * Derived from drivers/vfio/pci/vfio_pci_intrs.c
5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/vfio.h>
14 #include <linux/eventfd.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
18 static struct workqueue_struct *vfio_irqfd_cleanup_wq;
19 DEFINE_SPINLOCK(virqfd_lock);
21 int __init vfio_virqfd_init(void)
23 vfio_irqfd_cleanup_wq =
24 create_singlethread_workqueue("vfio-irqfd-cleanup");
25 if (!vfio_irqfd_cleanup_wq)
31 void vfio_virqfd_exit(void)
33 destroy_workqueue(vfio_irqfd_cleanup_wq);
36 static void virqfd_deactivate(struct virqfd *virqfd)
38 queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
41 static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
43 struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
44 unsigned long flags = (unsigned long)key;
47 /* An event has been signaled, call function */
48 if ((!virqfd->handler ||
49 virqfd->handler(virqfd->opaque, virqfd->data)) &&
51 schedule_work(&virqfd->inject);
54 if (flags & POLLHUP) {
56 spin_lock_irqsave(&virqfd_lock, flags);
59 * The eventfd is closing, if the virqfd has not yet been
60 * queued for release, as determined by testing whether the
61 * virqfd pointer to it is still valid, queue it now. As
62 * with kvm irqfds, we know we won't race against the virqfd
63 * going away because we hold the lock to get here.
65 if (*(virqfd->pvirqfd) == virqfd) {
66 *(virqfd->pvirqfd) = NULL;
67 virqfd_deactivate(virqfd);
70 spin_unlock_irqrestore(&virqfd_lock, flags);
76 static void virqfd_ptable_queue_proc(struct file *file,
77 wait_queue_head_t *wqh, poll_table *pt)
79 struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
80 add_wait_queue(wqh, &virqfd->wait);
83 static void virqfd_shutdown(struct work_struct *work)
85 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
88 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
89 flush_work(&virqfd->inject);
90 eventfd_ctx_put(virqfd->eventfd);
95 static void virqfd_inject(struct work_struct *work)
97 struct virqfd *virqfd = container_of(work, struct virqfd, inject);
99 virqfd->thread(virqfd->opaque, virqfd->data);
102 int vfio_virqfd_enable(void *opaque,
103 int (*handler)(void *, void *),
104 void (*thread)(void *, void *),
105 void *data, struct virqfd **pvirqfd, int fd)
108 struct eventfd_ctx *ctx;
109 struct virqfd *virqfd;
113 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
117 virqfd->pvirqfd = pvirqfd;
118 virqfd->opaque = opaque;
119 virqfd->handler = handler;
120 virqfd->thread = thread;
123 INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
124 INIT_WORK(&virqfd->inject, virqfd_inject);
132 ctx = eventfd_ctx_fileget(irqfd.file);
138 virqfd->eventfd = ctx;
141 * virqfds can be released by closing the eventfd or directly
142 * through ioctl. These are both done through a workqueue, so
143 * we update the pointer to the virqfd under lock to avoid
144 * pushing multiple jobs to release the same virqfd.
146 spin_lock_irq(&virqfd_lock);
149 spin_unlock_irq(&virqfd_lock);
155 spin_unlock_irq(&virqfd_lock);
158 * Install our own custom wake-up handling so we are notified via
159 * a callback whenever someone signals the underlying eventfd.
161 init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
162 init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
164 events = irqfd.file->f_op->poll(irqfd.file, &virqfd->pt);
167 * Check if there was an event already pending on the eventfd
168 * before we registered and trigger it as if we didn't miss it.
170 if (events & POLLIN) {
171 if ((!handler || handler(opaque, data)) && thread)
172 schedule_work(&virqfd->inject);
176 * Do not drop the file until the irqfd is fully initialized,
177 * otherwise we might race against the POLLHUP.
183 eventfd_ctx_put(ctx);
191 EXPORT_SYMBOL_GPL(vfio_virqfd_enable);
193 void vfio_virqfd_disable(struct virqfd **pvirqfd)
197 spin_lock_irqsave(&virqfd_lock, flags);
200 virqfd_deactivate(*pvirqfd);
204 spin_unlock_irqrestore(&virqfd_lock, flags);
207 * Block until we know all outstanding shutdown jobs have completed.
208 * Even if we don't queue the job, flush the wq to be sure it's
211 flush_workqueue(vfio_irqfd_cleanup_wq);
213 EXPORT_SYMBOL_GPL(vfio_virqfd_disable);