OSDN Git Service

block: move the remaining elv.icq handling to the I/O scheduler
authorChristoph Hellwig <hch@lst.de>
Fri, 26 Nov 2021 11:58:11 +0000 (12:58 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 29 Nov 2021 13:41:29 +0000 (06:41 -0700)
After the prepare side has been moved to the only I/O scheduler that
cares, do the same for the cleanup and the NULL initialization.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211126115817.2087431-9-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bfq-iosched.c
block/blk-ioc.c
block/blk-mq.c

index 2d484d3..8295b0f 100644 (file)
@@ -6569,6 +6569,16 @@ static void bfq_finish_requeue_request(struct request *rq)
        rq->elv.priv[1] = NULL;
 }
 
+static void bfq_finish_request(struct request *rq)
+{
+       bfq_finish_requeue_request(rq);
+
+       if (rq->elv.icq) {
+               put_io_context(rq->elv.icq->ioc);
+               rq->elv.icq = NULL;
+       }
+}
+
 /*
  * Removes the association between the current task and bfqq, assuming
  * that bic points to the bfq iocontext of the task.
@@ -7388,7 +7398,7 @@ static struct elevator_type iosched_bfq_mq = {
                .limit_depth            = bfq_limit_depth,
                .prepare_request        = bfq_prepare_request,
                .requeue_request        = bfq_finish_requeue_request,
-               .finish_request         = bfq_finish_requeue_request,
+               .finish_request         = bfq_finish_request,
                .exit_icq               = bfq_exit_icq,
                .insert_requests        = bfq_insert_requests,
                .dispatch_request       = bfq_dispatch_request,
index f4f84a2..3ba15c8 100644 (file)
@@ -167,6 +167,7 @@ void put_io_context(struct io_context *ioc)
        if (free_ioc)
                kmem_cache_free(iocontext_cachep, ioc);
 }
+EXPORT_SYMBOL_GPL(put_io_context);
 
 /**
  * put_io_context_active - put active reference on ioc
index 143a8ed..3e67662 100644 (file)
@@ -400,7 +400,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        if (rq->rq_flags & RQF_ELV) {
                struct elevator_queue *e = data->q->elevator;
 
-               rq->elv.icq = NULL;
                INIT_HLIST_NODE(&rq->hash);
                RB_CLEAR_NODE(&rq->rb_node);
 
@@ -631,16 +630,9 @@ void blk_mq_free_request(struct request *rq)
        struct request_queue *q = rq->q;
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
-       if (rq->rq_flags & RQF_ELVPRIV) {
-               struct elevator_queue *e = q->elevator;
-
-               if (e->type->ops.finish_request)
-                       e->type->ops.finish_request(rq);
-               if (rq->elv.icq) {
-                       put_io_context(rq->elv.icq->ioc);
-                       rq->elv.icq = NULL;
-               }
-       }
+       if ((rq->rq_flags & RQF_ELVPRIV) &&
+           q->elevator->type->ops.finish_request)
+               q->elevator->type->ops.finish_request(rq);
 
        if (rq->rq_flags & RQF_MQ_INFLIGHT)
                __blk_mq_dec_active_requests(hctx);