OSDN Git Service

blk-mq: move failure injection out of blk_mq_complete_request
authorChristoph Hellwig <hch@lst.de>
Thu, 11 Jun 2020 06:44:47 +0000 (08:44 +0200)
committerJens Axboe <axboe@kernel.dk>
Wed, 24 Jun 2020 15:15:57 +0000 (09:15 -0600)
Move the call to blk_should_fake_timeout out of blk_mq_complete_request
and into the drivers, skipping call sites that are obvious error
handlers, and remove the now superflous blk_mq_force_complete_rq helper.
This ensures we don't keep injecting errors into completions that just
terminate the Linux request after the hardware has been reset or the
command has been aborted.

Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
19 files changed:
block/blk-mq.c
block/blk-timeout.c
block/blk.h
block/bsg-lib.c
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nbd.c
drivers/block/null_blk_main.c
drivers/block/skd_main.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/md/dm-rq.c
drivers/mmc/core/block.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/s390/block/dasd.c
drivers/s390/block/scm_blk.c
drivers/scsi/scsi_lib.c
include/linux/blk-mq.h

index ce772ab..3f4f227 100644 (file)
@@ -655,16 +655,13 @@ static void __blk_mq_complete_request_remote(void *data)
 }
 
 /**
- * blk_mq_force_complete_rq() - Force complete the request, bypassing any error
- *                             injection that could drop the completion.
- * @rq: Request to be force completed
+ * blk_mq_complete_request - end I/O on a request
+ * @rq:                the request being processed
  *
- * Drivers should use blk_mq_complete_request() to complete requests in their
- * normal IO path. For timeout error recovery, drivers may call this forced
- * completion routine after they've reclaimed timed out requests to bypass
- * potentially subsequent fake timeouts.
- */
-void blk_mq_force_complete_rq(struct request *rq)
+ * Description:
+ *     Complete a request by scheduling the ->complete_rq operation.
+ **/
+void blk_mq_complete_request(struct request *rq)
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
        struct request_queue *q = rq->q;
@@ -702,7 +699,7 @@ void blk_mq_force_complete_rq(struct request *rq)
        }
        put_cpu();
 }
-EXPORT_SYMBOL_GPL(blk_mq_force_complete_rq);
+EXPORT_SYMBOL(blk_mq_complete_request);
 
 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
        __releases(hctx->srcu)
@@ -725,23 +722,6 @@ static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
 }
 
 /**
- * blk_mq_complete_request - end I/O on a request
- * @rq:                the request being processed
- *
- * Description:
- *     Ends all I/O on a request. It does not handle partial completions.
- *     The actual completion happens out-of-order, through a IPI handler.
- **/
-bool blk_mq_complete_request(struct request *rq)
-{
-       if (unlikely(blk_should_fake_timeout(rq->q)))
-               return false;
-       blk_mq_force_complete_rq(rq);
-       return true;
-}
-EXPORT_SYMBOL(blk_mq_complete_request);
-
-/**
  * blk_mq_start_request - Start processing a request
  * @rq: Pointer to request to be started
  *
index 8aa68fa..3a1ac64 100644 (file)
@@ -20,13 +20,11 @@ static int __init setup_fail_io_timeout(char *str)
 }
 __setup("fail_io_timeout=", setup_fail_io_timeout);
 
-int blk_should_fake_timeout(struct request_queue *q)
+bool __blk_should_fake_timeout(struct request_queue *q)
 {
-       if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
-               return 0;
-
        return should_fail(&fail_io_timeout, 1);
 }
+EXPORT_SYMBOL_GPL(__blk_should_fake_timeout);
 
 static int __init fail_io_timeout_debugfs(void)
 {
index b5d1f0f..8ba4a5e 100644 (file)
@@ -223,18 +223,9 @@ ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
                char *buf);
 ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count);
-
-#ifdef CONFIG_FAIL_IO_TIMEOUT
-int blk_should_fake_timeout(struct request_queue *);
 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
 ssize_t part_timeout_store(struct device *, struct device_attribute *,
                                const char *, size_t);
-#else
-static inline int blk_should_fake_timeout(struct request_queue *q)
-{
-       return 0;
-}
-#endif
 
 void __blk_queue_split(struct request_queue *q, struct bio **bio,
                unsigned int *nr_segs);
index 6cbb792..fb7b347 100644 (file)
@@ -181,9 +181,12 @@ EXPORT_SYMBOL_GPL(bsg_job_get);
 void bsg_job_done(struct bsg_job *job, int result,
                  unsigned int reply_payload_rcv_len)
 {
+       struct request *rq = blk_mq_rq_from_pdu(job);
+
        job->result = result;
        job->reply_payload_rcv_len = reply_payload_rcv_len;
-       blk_mq_complete_request(blk_mq_rq_from_pdu(job));
+       if (likely(!blk_should_fake_timeout(rq->q)))
+               blk_mq_complete_request(rq);
 }
 EXPORT_SYMBOL_GPL(bsg_job_done);
 
index 475e1a7..4acae24 100644 (file)
@@ -509,7 +509,8 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
                return;
        kfree(cmd->bvec);
        cmd->bvec = NULL;
-       blk_mq_complete_request(rq);
+       if (likely(!blk_should_fake_timeout(rq->q)))
+               blk_mq_complete_request(rq);
 }
 
 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -2048,7 +2049,8 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
                        cmd->ret = ret;
                else
                        cmd->ret = ret ? -EIO : 0;
-               blk_mq_complete_request(rq);
+               if (likely(!blk_should_fake_timeout(rq->q)))
+                       blk_mq_complete_request(rq);
        }
 }
 
index f6bafa9..153e2cd 100644 (file)
@@ -492,7 +492,8 @@ static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
        struct request *req = blk_mq_rq_from_pdu(cmd);
 
        cmd->status = status;
-       blk_mq_complete_request(req);
+       if (likely(!blk_should_fake_timeout(req->q)))
+               blk_mq_complete_request(req);
 }
 
 /*
index 43cff01..01794cd 100644 (file)
@@ -784,6 +784,7 @@ static void recv_work(struct work_struct *work)
        struct nbd_device *nbd = args->nbd;
        struct nbd_config *config = nbd->config;
        struct nbd_cmd *cmd;
+       struct request *rq;
 
        while (1) {
                cmd = nbd_read_stat(nbd, args->index);
@@ -796,7 +797,9 @@ static void recv_work(struct work_struct *work)
                        break;
                }
 
-               blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
+               rq = blk_mq_rq_from_pdu(cmd);
+               if (likely(!blk_should_fake_timeout(rq->q)))
+                       blk_mq_complete_request(rq);
        }
        atomic_dec(&config->recv_threads);
        wake_up(&config->recv_wq);
index 87b31f9..8225924 100644 (file)
@@ -1283,7 +1283,8 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
        case NULL_IRQ_SOFTIRQ:
                switch (cmd->nq->dev->queue_mode) {
                case NULL_Q_MQ:
-                       blk_mq_complete_request(cmd->rq);
+                       if (likely(!blk_should_fake_timeout(cmd->rq->q)))
+                               blk_mq_complete_request(cmd->rq);
                        break;
                case NULL_Q_BIO:
                        /*
@@ -1423,7 +1424,7 @@ static bool should_requeue_request(struct request *rq)
 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
 {
        pr_info("rq %p timed out\n", rq);
-       blk_mq_force_complete_rq(rq);
+       blk_mq_complete_request(rq);
        return BLK_EH_DONE;
 }
 
index 51569c1..3a476dc 100644 (file)
@@ -1417,7 +1417,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
        case SKD_CHECK_STATUS_REPORT_GOOD:
        case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
                skreq->status = BLK_STS_OK;
-               blk_mq_complete_request(req);
+               if (likely(!blk_should_fake_timeout(req->q)))
+                       blk_mq_complete_request(req);
                break;
 
        case SKD_CHECK_STATUS_BUSY_IMMINENT:
@@ -1440,7 +1441,8 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
        case SKD_CHECK_STATUS_REPORT_ERROR:
        default:
                skreq->status = BLK_STS_IOERR;
-               blk_mq_complete_request(req);
+               if (likely(!blk_should_fake_timeout(req->q)))
+                       blk_mq_complete_request(req);
                break;
        }
 }
@@ -1560,7 +1562,8 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
                 */
                if (likely(cmp_status == SAM_STAT_GOOD)) {
                        skreq->status = BLK_STS_OK;
-                       blk_mq_complete_request(rq);
+                       if (likely(!blk_should_fake_timeout(rq->q)))
+                               blk_mq_complete_request(rq);
                } else {
                        skd_resolve_req_exception(skdev, skreq, rq);
                }
index 9d21bf0..741804b 100644 (file)
@@ -171,7 +171,8 @@ static void virtblk_done(struct virtqueue *vq)
                while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
                        struct request *req = blk_mq_rq_from_pdu(vbr);
 
-                       blk_mq_complete_request(req);
+                       if (likely(!blk_should_fake_timeout(req->q)))
+                               blk_mq_complete_request(req);
                        req_done = true;
                }
                if (unlikely(virtqueue_is_broken(vq)))
index 3b889ea..3bb3dd8 100644 (file)
@@ -1655,7 +1655,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                        BUG();
                }
 
-               blk_mq_complete_request(req);
+               if (likely(!blk_should_fake_timeout(req->q)))
+                       blk_mq_complete_request(req);
        }
 
        rinfo->ring.rsp_cons = i;
index f60c025..5aec1cd 100644 (file)
@@ -288,7 +288,8 @@ static void dm_complete_request(struct request *rq, blk_status_t error)
        struct dm_rq_target_io *tio = tio_from_request(rq);
 
        tio->error = error;
-       blk_mq_complete_request(rq);
+       if (likely(!blk_should_fake_timeout(rq->q)))
+               blk_mq_complete_request(rq);
 }
 
 /*
index 7896952..4791c82 100644 (file)
@@ -1446,7 +1446,7 @@ static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
         */
        if (mq->in_recovery)
                mmc_blk_cqe_complete_rq(mq, req);
-       else
+       else if (likely(!blk_should_fake_timeout(req->q)))
                blk_mq_complete_request(req);
 }
 
@@ -1926,7 +1926,7 @@ static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
         */
        if (mq->in_recovery)
                mmc_blk_cqe_complete_rq(mq, req);
-       else
+       else if (likely(!blk_should_fake_timeout(req->q)))
                blk_mq_complete_request(req);
 }
 
@@ -1936,7 +1936,7 @@ void mmc_blk_mq_complete(struct request *req)
 
        if (mq->use_cqe)
                mmc_blk_cqe_complete_rq(mq, req);
-       else
+       else if (likely(!blk_should_fake_timeout(req->q)))
                mmc_blk_mq_complete_rq(mq, req);
 }
 
@@ -1988,7 +1988,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
         */
        if (mq->in_recovery)
                mmc_blk_mq_complete_rq(mq, req);
-       else
+       else if (likely(!blk_should_fake_timeout(req->q)))
                blk_mq_complete_request(req);
 
        mmc_blk_mq_dec_in_flight(mq, req);
index c2c5bc4..6810c88 100644 (file)
@@ -304,7 +304,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
                return true;
 
        nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
-       blk_mq_force_complete_rq(req);
+       blk_mq_complete_request(req);
        return true;
 }
 EXPORT_SYMBOL_GPL(nvme_cancel_request);
index c0f4226..0346132 100644 (file)
@@ -481,7 +481,8 @@ static inline void nvme_end_request(struct request *req, __le16 status,
        rq->result = result;
        /* inject error when permitted by fault injection framework */
        nvme_should_fail(req);
-       blk_mq_complete_request(req);
+       if (likely(!blk_should_fake_timeout(req->q)))
+               blk_mq_complete_request(req);
 }
 
 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
index cf87eb2..eb17fea 100644 (file)
@@ -2802,7 +2802,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
                        blk_update_request(req, BLK_STS_OK,
                                           blk_rq_bytes(req) - proc_bytes);
                        blk_mq_requeue_request(req, true);
-               } else {
+               } else if (likely(!blk_should_fake_timeout(req->q))) {
                        blk_mq_complete_request(req);
                }
        }
index e018893..a4f6f2e 100644 (file)
@@ -256,7 +256,8 @@ static void scm_request_finish(struct scm_request *scmrq)
        for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
                error = blk_mq_rq_to_pdu(scmrq->request[i]);
                *error = scmrq->error;
-               blk_mq_complete_request(scmrq->request[i]);
+               if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
+                       blk_mq_complete_request(scmrq->request[i]);
        }
 
        atomic_dec(&bdev->queued_reqs);
index 0ba7a65..6ca91d0 100644 (file)
@@ -1589,18 +1589,12 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
 
 static void scsi_mq_done(struct scsi_cmnd *cmd)
 {
+       if (unlikely(blk_should_fake_timeout(cmd->request->q)))
+               return;
        if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
                return;
        trace_scsi_dispatch_cmd_done(cmd);
-
-       /*
-        * If the block layer didn't complete the request due to a timeout
-        * injection, scsi must clear its internal completed state so that the
-        * timeout handler will see it needs to escalate its own error
-        * recovery.
-        */
-       if (unlikely(!blk_mq_complete_request(cmd->request)))
-               clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
+       blk_mq_complete_request(cmd->request);
 }
 
 static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
index d6fcae1..8e6ab76 100644 (file)
@@ -503,8 +503,7 @@ void __blk_mq_end_request(struct request *rq, blk_status_t error);
 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
-bool blk_mq_complete_request(struct request *rq);
-void blk_mq_force_complete_rq(struct request *rq);
+void blk_mq_complete_request(struct request *rq);
 bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
                           struct bio *bio, unsigned int nr_segs);
 bool blk_mq_queue_stopped(struct request_queue *q);
@@ -537,6 +536,15 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q);
 
 unsigned int blk_mq_rq_cpu(struct request *rq);
 
+bool __blk_should_fake_timeout(struct request_queue *q);
+static inline bool blk_should_fake_timeout(struct request_queue *q)
+{
+       if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
+           test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
+               return __blk_should_fake_timeout(q);
+       return false;
+}
+
 /**
  * blk_mq_rq_from_pdu - cast a PDU to a request
  * @pdu: the PDU (Protocol Data Unit) to be casted