1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
9 #include <uapi/linux/io_uring.h>
13 #include "filetable.h"
18 struct file *src_file;
19 struct callback_head tw;
28 static void io_double_unlock_ctx(struct io_ring_ctx *octx)
30 mutex_unlock(&octx->uring_lock);
33 static int io_double_lock_ctx(struct io_ring_ctx *octx,
34 unsigned int issue_flags)
37 * To ensure proper ordering between the two ctxs, we can only
38 * attempt a trylock on the target. If that fails and we already have
39 * the source ctx lock, punt to io-wq.
41 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
42 if (!mutex_trylock(&octx->uring_lock))
46 mutex_lock(&octx->uring_lock);
50 void io_msg_ring_cleanup(struct io_kiocb *req)
52 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
54 if (WARN_ON_ONCE(!msg->src_file))
61 static void io_msg_tw_complete(struct callback_head *head)
63 struct io_msg *msg = container_of(head, struct io_msg, tw);
64 struct io_kiocb *req = cmd_to_io_kiocb(msg);
65 struct io_ring_ctx *target_ctx = req->file->private_data;
68 if (current->flags & PF_EXITING) {
72 * If the target ring is using IOPOLL mode, then we need to be
73 * holding the uring_lock for posting completions. Other ring
74 * types rely on the regular completion locking, which is
75 * handled while posting.
77 if (target_ctx->flags & IORING_SETUP_IOPOLL)
78 mutex_lock(&target_ctx->uring_lock);
79 if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
81 if (target_ctx->flags & IORING_SETUP_IOPOLL)
82 mutex_unlock(&target_ctx->uring_lock);
87 io_req_queue_tw_complete(req, ret);
90 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
92 struct io_ring_ctx *target_ctx = req->file->private_data;
93 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
96 if (msg->src_fd || msg->dst_fd || msg->flags)
99 if (target_ctx->task_complete && current != target_ctx->submitter_task) {
100 init_task_work(&msg->tw, io_msg_tw_complete);
101 if (task_work_add(target_ctx->submitter_task, &msg->tw,
105 atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags);
106 return IOU_ISSUE_SKIP_COMPLETE;
110 if (target_ctx->flags & IORING_SETUP_IOPOLL) {
111 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
113 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
115 io_double_unlock_ctx(target_ctx);
117 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
123 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
125 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
126 struct io_ring_ctx *ctx = req->ctx;
127 struct file *file = NULL;
128 unsigned long file_ptr;
129 int idx = msg->src_fd;
131 io_ring_submit_lock(ctx, issue_flags);
132 if (likely(idx < ctx->nr_user_files)) {
133 idx = array_index_nospec(idx, ctx->nr_user_files);
134 file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr;
135 file = (struct file *) (file_ptr & FFS_MASK);
139 io_ring_submit_unlock(ctx, issue_flags);
143 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
145 struct io_ring_ctx *target_ctx = req->file->private_data;
146 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
147 struct file *src_file = msg->src_file;
150 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
153 ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
157 msg->src_file = NULL;
158 req->flags &= ~REQ_F_NEED_CLEANUP;
160 if (msg->flags & IORING_MSG_RING_CQE_SKIP)
163 * If this fails, the target still received the file descriptor but
164 * wasn't notified of the fact. This means that if this request
165 * completes with -EOVERFLOW, then the sender must ensure that a
166 * later IORING_OP_MSG_RING delivers the message.
168 if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
171 io_double_unlock_ctx(target_ctx);
175 static void io_msg_tw_fd_complete(struct callback_head *head)
177 struct io_msg *msg = container_of(head, struct io_msg, tw);
178 struct io_kiocb *req = cmd_to_io_kiocb(msg);
179 int ret = -EOWNERDEAD;
181 if (!(current->flags & PF_EXITING))
182 ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED);
185 io_req_queue_tw_complete(req, ret);
188 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
190 struct io_ring_ctx *target_ctx = req->file->private_data;
191 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
192 struct io_ring_ctx *ctx = req->ctx;
193 struct file *src_file = msg->src_file;
195 if (target_ctx == ctx)
198 src_file = io_msg_grab_file(req, issue_flags);
201 msg->src_file = src_file;
202 req->flags |= REQ_F_NEED_CLEANUP;
205 if (target_ctx->task_complete && current != target_ctx->submitter_task) {
206 init_task_work(&msg->tw, io_msg_tw_fd_complete);
207 if (task_work_add(target_ctx->submitter_task, &msg->tw,
211 return IOU_ISSUE_SKIP_COMPLETE;
213 return io_msg_install_complete(req, issue_flags);
216 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
218 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
220 if (unlikely(sqe->buf_index || sqe->personality))
223 msg->src_file = NULL;
224 msg->user_data = READ_ONCE(sqe->off);
225 msg->len = READ_ONCE(sqe->len);
226 msg->cmd = READ_ONCE(sqe->addr);
227 msg->src_fd = READ_ONCE(sqe->addr3);
228 msg->dst_fd = READ_ONCE(sqe->file_index);
229 msg->flags = READ_ONCE(sqe->msg_ring_flags);
230 if (msg->flags & ~IORING_MSG_RING_CQE_SKIP)
236 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
238 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
242 if (!io_is_uring_fops(req->file))
246 case IORING_MSG_DATA:
247 ret = io_msg_ring_data(req, issue_flags);
249 case IORING_MSG_SEND_FD:
250 ret = io_msg_send_fd(req, issue_flags);
259 if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE)
263 io_req_set_res(req, ret, 0);