From 222ee581b84582dc472d5395b77d7e0cb5268d1c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 26 Nov 2021 12:58:11 +0100 Subject: [PATCH] block: move the remaining elv.icq handling to the I/O scheduler After the prepare side has been moved to the only I/O scheduler that cares, do the same for the cleanup and the NULL initialization. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211126115817.2087431-9-hch@lst.de Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 12 +++++++++++- block/blk-ioc.c | 1 + block/blk-mq.c | 14 +++----------- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 2d484d3f7f22..8295b0f96cbf 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -6569,6 +6569,16 @@ static void bfq_finish_requeue_request(struct request *rq) rq->elv.priv[1] = NULL; } +static void bfq_finish_request(struct request *rq) +{ + bfq_finish_requeue_request(rq); + + if (rq->elv.icq) { + put_io_context(rq->elv.icq->ioc); + rq->elv.icq = NULL; + } +} + /* * Removes the association between the current task and bfqq, assuming * that bic points to the bfq iocontext of the task. @@ -7388,7 +7398,7 @@ static struct elevator_type iosched_bfq_mq = { .limit_depth = bfq_limit_depth, .prepare_request = bfq_prepare_request, .requeue_request = bfq_finish_requeue_request, - .finish_request = bfq_finish_requeue_request, + .finish_request = bfq_finish_request, .exit_icq = bfq_exit_icq, .insert_requests = bfq_insert_requests, .dispatch_request = bfq_dispatch_request, diff --git a/block/blk-ioc.c b/block/blk-ioc.c index f4f84a2072be..3ba15c867dfa 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -167,6 +167,7 @@ void put_io_context(struct io_context *ioc) if (free_ioc) kmem_cache_free(iocontext_cachep, ioc); } +EXPORT_SYMBOL_GPL(put_io_context); /** * put_io_context_active - put active reference on ioc diff --git a/block/blk-mq.c b/block/blk-mq.c index 143a8edf6300..3e67662f7801 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -400,7 +400,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, if (rq->rq_flags & RQF_ELV) { struct elevator_queue *e = data->q->elevator; - rq->elv.icq = NULL; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); @@ -631,16 +630,9 @@ void blk_mq_free_request(struct request *rq) struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - if (rq->rq_flags & RQF_ELVPRIV) { - struct elevator_queue *e = q->elevator; - - if (e->type->ops.finish_request) - e->type->ops.finish_request(rq); - if (rq->elv.icq) { - put_io_context(rq->elv.icq->ioc); - rq->elv.icq = NULL; - } - } + if ((rq->rq_flags & RQF_ELVPRIV) && + q->elevator->type->ops.finish_request) + q->elevator->type->ops.finish_request(rq); if (rq->rq_flags & RQF_MQ_INFLIGHT) __blk_mq_dec_active_requests(hctx); -- 2.11.0