OSDN Git Service

blk-mq: cleanup request allocation
authorChristoph Hellwig <hch@lst.de>
Wed, 24 Nov 2021 06:28:56 +0000 (07:28 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 29 Nov 2021 13:38:51 +0000 (06:38 -0700)
Refactor the request alloction so that blk_mq_get_cached_request tries
to find a cached request first, and the entirely separate and now
self contained blk_mq_get_new_requests allocates one or more requests
if that is not possible.

There is a small change in behavior as submit_bio_checks is called
twice now if a cached request is present but can't be used, but that
is a small price to pay for unwinding this code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211124062856.1444266-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c

index 4bdc3bc..a89a624 100644 (file)
@@ -2717,8 +2717,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        };
        struct request *rq;
 
-       if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+       if (unlikely(bio_queue_enter(bio)))
                return NULL;
+       if (unlikely(!submit_bio_checks(bio)))
+               goto queue_exit;
+       if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+               goto queue_exit;
 
        rq_qos_throttle(q, bio);
 
@@ -2729,64 +2733,44 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
        }
 
        rq = __blk_mq_alloc_requests(&data);
-       if (rq)
-               return rq;
+       if (!rq)
+               goto fail;
+       return rq;
 
+fail:
        rq_qos_cleanup(q, bio);
        if (bio->bi_opf & REQ_NOWAIT)
                bio_wouldblock_error(bio);
-
+queue_exit:
+       blk_queue_exit(q);
        return NULL;
 }
 
-static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
-{
-       if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
-               return false;
-
-       if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
-               return false;
-
-       return true;
-}
-
-static inline struct request *blk_mq_get_request(struct request_queue *q,
-                                                struct blk_plug *plug,
-                                                struct bio *bio,
-                                                unsigned int nsegs)
+static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+               struct blk_plug *plug, struct bio *bio, unsigned int nsegs)
 {
        struct request *rq;
-       bool checked = false;
 
-       if (plug) {
-               rq = rq_list_peek(&plug->cached_rq);
-               if (rq && rq->q == q) {
-                       if (unlikely(!submit_bio_checks(bio)))
-                               return NULL;
-                       if (blk_mq_attempt_bio_merge(q, bio, nsegs))
-                               return NULL;
-                       checked = true;
-                       if (!blk_mq_can_use_cached_rq(rq, bio))
-                               goto fallback;
-                       rq->cmd_flags = bio->bi_opf;
-                       plug->cached_rq = rq_list_next(rq);
-                       INIT_LIST_HEAD(&rq->queuelist);
-                       rq_qos_throttle(q, bio);
-                       return rq;
-               }
-       }
+       if (!plug)
+               return NULL;
+       rq = rq_list_peek(&plug->cached_rq);
+       if (!rq || rq->q != q)
+               return NULL;
 
-fallback:
-       if (unlikely(bio_queue_enter(bio)))
+       if (unlikely(!submit_bio_checks(bio)))
                return NULL;
-       if (unlikely(!checked && !submit_bio_checks(bio)))
-               goto out_put;
-       rq = blk_mq_get_new_requests(q, plug, bio, nsegs);
-       if (rq)
-               return rq;
-out_put:
-       blk_queue_exit(q);
-       return NULL;
+       if (blk_mq_attempt_bio_merge(q, bio, nsegs))
+               return NULL;
+       if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
+               return NULL;
+       if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+               return NULL;
+
+       rq->cmd_flags = bio->bi_opf;
+       plug->cached_rq = rq_list_next(rq);
+       INIT_LIST_HEAD(&rq->queuelist);
+       rq_qos_throttle(q, bio);
+       return rq;
 }
 
 /**
@@ -2805,9 +2789,9 @@ out_put:
 void blk_mq_submit_bio(struct bio *bio)
 {
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+       struct blk_plug *plug = blk_mq_plug(q, bio);
        const int is_sync = op_is_sync(bio->bi_opf);
        struct request *rq;
-       struct blk_plug *plug;
        unsigned int nr_segs = 1;
        blk_status_t ret;
 
@@ -2821,10 +2805,12 @@ void blk_mq_submit_bio(struct bio *bio)
        if (!bio_integrity_prep(bio))
                return;
 
-       plug = blk_mq_plug(q, bio);
-       rq = blk_mq_get_request(q, plug, bio, nr_segs);
-       if (unlikely(!rq))
-               return;
+       rq = blk_mq_get_cached_request(q, plug, bio, nr_segs);
+       if (!rq) {
+               rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+               if (unlikely(!rq))
+                       return;
+       }
 
        trace_block_getrq(bio);