OSDN Git Service

blk-mq: move more logic into blk_mq_insert_requests
authorChristoph Hellwig <hch@lst.de>
Thu, 13 Apr 2023 06:40:41 +0000 (08:40 +0200)
committerJens Axboe <axboe@kernel.dk>
Thu, 13 Apr 2023 12:52:29 +0000 (06:52 -0600)
Move all logic related to the direct insert (including the call to
blk_mq_run_hw_queue) into blk_mq_insert_requests to streamline the code
flow up a bit, and to allow marking blk_mq_try_issue_list_directly
static.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413064057.707578-5-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c
block/blk-mq.c
block/blk-mq.h

index c4b2d44..811a976 100644 (file)
@@ -472,23 +472,10 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
        e = hctx->queue->elevator;
        if (e) {
                e->type->ops.insert_requests(hctx, list, false);
+               blk_mq_run_hw_queue(hctx, run_queue_async);
        } else {
-               /*
-                * try to issue requests directly if the hw queue isn't
-                * busy in case of 'none' scheduler, and this way may save
-                * us one extra enqueue & dequeue to sw queue.
-                */
-               if (!hctx->dispatch_busy && !run_queue_async) {
-                       blk_mq_run_dispatch_ops(hctx->queue,
-                               blk_mq_try_issue_list_directly(hctx, list));
-                       if (list_empty(list))
-                               goto out;
-               }
-               blk_mq_insert_requests(hctx, ctx, list);
+               blk_mq_insert_requests(hctx, ctx, list, run_queue_async);
        }
-
-       blk_mq_run_hw_queue(hctx, run_queue_async);
- out:
        percpu_ref_put(&q->q_usage_counter);
 }
 
index 29014a0..536f001 100644 (file)
@@ -44,6 +44,9 @@
 
 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
 
+static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
+               struct list_head *list);
+
 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
                blk_qc_t qc)
 {
@@ -2495,13 +2498,24 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
 }
 
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                           struct list_head *list)
+                           struct list_head *list, bool run_queue_async)
 
 {
        struct request *rq;
        enum hctx_type type = hctx->type;
 
        /*
+        * Try to issue requests directly if the hw queue isn't busy to save an
+        * extra enqueue & dequeue to the sw queue.
+        */
+       if (!hctx->dispatch_busy && !run_queue_async) {
+               blk_mq_run_dispatch_ops(hctx->queue,
+                       blk_mq_try_issue_list_directly(hctx, list));
+               if (list_empty(list))
+                       goto out;
+       }
+
+       /*
         * preemption doesn't flush plug list, so it's possible ctx->cpu is
         * offline now
         */
@@ -2514,6 +2528,8 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
        list_splice_tail_init(list, &ctx->rq_lists[type]);
        blk_mq_hctx_mark_pending(hctx, ctx);
        spin_unlock(&ctx->lock);
+out:
+       blk_mq_run_hw_queue(hctx, run_queue_async);
 }
 
 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
@@ -2755,7 +2771,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
        } while (!rq_list_empty(plug->mq_list));
 }
 
-void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
+static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                struct list_head *list)
 {
        int queued = 0;
index fa13b69..5d551f9 100644 (file)
@@ -70,9 +70,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
                                  bool run_queue);
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                               struct list_head *list);
-void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
-                                   struct list_head *list);
+                               struct list_head *list, bool run_queue_async);
 
 /*
  * CPU -> queue mappings