OSDN Git Service

io_uring: encapsulate rw state
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 14 Oct 2021 15:10:15 +0000 (16:10 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 19 Oct 2021 11:49:55 +0000 (05:49 -0600)
Add a new struct io_rw_state storing all iov related bits: fast iov,
iterator and iterator state. Not much changes here, simply convert
struct io_async_rw to use it.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e8245ffcb568b228a009ec1eb79c993c813679f1.1634144845.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 18a91d2..4736561 100644 (file)
@@ -694,11 +694,15 @@ struct io_async_msghdr {
        struct sockaddr_storage         addr;
 };
 
-struct io_async_rw {
+struct io_rw_state {
        struct iovec                    fast_iov[UIO_FASTIOV];
-       const struct iovec              *free_iovec;
        struct iov_iter                 iter;
        struct iov_iter_state           iter_state;
+};
+
+struct io_async_rw {
+       struct io_rw_state              s;
+       const struct iovec              *free_iovec;
        size_t                          bytes_done;
        struct wait_page_queue          wpq;
 };
@@ -2596,7 +2600,7 @@ static bool io_resubmit_prep(struct io_kiocb *req)
 
        if (!req_has_async_data(req))
                return !io_req_prep_async(req);
-       iov_iter_restore(&rw->iter, &rw->iter_state);
+       iov_iter_restore(&rw->s.iter, &rw->s.iter_state);
        return true;
 }
 
@@ -3259,7 +3263,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
 {
        struct io_async_rw *rw = req->async_data;
 
-       memcpy(&rw->iter, iter, sizeof(*iter));
+       memcpy(&rw->s.iter, iter, sizeof(*iter));
        rw->free_iovec = iovec;
        rw->bytes_done = 0;
        /* can only be fixed buffers, no need to do anything */
@@ -3268,13 +3272,13 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
        if (!iovec) {
                unsigned iov_off = 0;
 
-               rw->iter.iov = rw->fast_iov;
+               rw->s.iter.iov = rw->s.fast_iov;
                if (iter->iov != fast_iov) {
                        iov_off = iter->iov - fast_iov;
-                       rw->iter.iov += iov_off;
+                       rw->s.iter.iov += iov_off;
                }
-               if (rw->fast_iov != fast_iov)
-                       memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
+               if (rw->s.fast_iov != fast_iov)
+                       memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off,
                               sizeof(struct iovec) * iter->nr_segs);
        } else {
                req->flags |= REQ_F_NEED_CLEANUP;
@@ -3309,7 +3313,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
                io_req_map_rw(req, iovec, fast_iov, iter);
                iorw = req->async_data;
                /* we've copied and mapped the iter, ensure state is saved */
-               iov_iter_save_state(&iorw->iter, &iorw->iter_state);
+               iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
        }
        return 0;
 }
@@ -3317,10 +3321,10 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
 {
        struct io_async_rw *iorw = req->async_data;
-       struct iovec *iov = iorw->fast_iov;
+       struct iovec *iov = iorw->s.fast_iov;
        int ret;
 
-       ret = io_import_iovec(rw, req, &iov, &iorw->iter, false);
+       ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, false);
        if (unlikely(ret < 0))
                return ret;
 
@@ -3328,7 +3332,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
        iorw->free_iovec = iov;
        if (iov)
                req->flags |= REQ_F_NEED_CLEANUP;
-       iov_iter_save_state(&iorw->iter, &iorw->iter_state);
+       iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
        return 0;
 }
 
@@ -3438,8 +3442,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
 
        if (req_has_async_data(req)) {
                rw = req->async_data;
-               iter = &rw->iter;
-               state = &rw->iter_state;
+               iter = &rw->s.iter;
+               state = &rw->s.iter_state;
                /*
                 * We come here from an earlier attempt, restore our state to
                 * match in case it doesn't. It's cheap enough that we don't
@@ -3510,9 +3514,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
         * Now use our persistent iterator and state, if we aren't already.
         * We've restored and mapped the iter to match.
         */
-       if (iter != &rw->iter) {
-               iter = &rw->iter;
-               state = &rw->iter_state;
+       if (iter != &rw->s.iter) {
+               iter = &rw->s.iter;
+               state = &rw->s.iter_state;
        }
 
        do {
@@ -3574,8 +3578,8 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
 
        if (req_has_async_data(req)) {
                rw = req->async_data;
-               iter = &rw->iter;
-               state = &rw->iter_state;
+               iter = &rw->s.iter;
+               state = &rw->s.iter_state;
                iov_iter_restore(iter, state);
                iovec = NULL;
        } else {