OSDN Git Service

nvmet: Open code nvmet_req_execute()
authorChristoph Hellwig <hch@lst.de>
Wed, 23 Oct 2019 16:35:45 +0000 (10:35 -0600)
committerJens Axboe <axboe@kernel.dk>
Mon, 4 Nov 2019 17:56:42 +0000 (10:56 -0700)
Now that nvmet_req_execute does nothing, open code it.

Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
[split patch, update changelog]
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/rdma.c
drivers/nvme/target/tcp.c

index 565def1..cde58c0 100644 (file)
@@ -942,12 +942,6 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len)
 }
 EXPORT_SYMBOL_GPL(nvmet_check_data_len);
 
-void nvmet_req_execute(struct nvmet_req *req)
-{
-       req->execute(req);
-}
-EXPORT_SYMBOL_GPL(nvmet_req_execute);
-
 int nvmet_req_alloc_sgl(struct nvmet_req *req)
 {
        struct pci_dev *p2p_dev = NULL;
index 61b6176..a0db637 100644 (file)
@@ -2018,7 +2018,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
                }
 
                /* data transfer complete, resume with nvmet layer */
-               nvmet_req_execute(&fod->req);
+               fod->req.execute(&fod->req);
                break;
 
        case NVMET_FCOP_READDATA:
@@ -2234,7 +2234,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
         * can invoke the nvmet_layer now. If read data, cmd completion will
         * push the data
         */
-       nvmet_req_execute(&fod->req);
+       fod->req.execute(&fod->req);
        return;
 
 transport_error:
index 5b7b197..856eb06 100644 (file)
@@ -125,7 +125,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
        struct nvme_loop_iod *iod =
                container_of(work, struct nvme_loop_iod, work);
 
-       nvmet_req_execute(&iod->req);
+       iod->req.execute(&iod->req);
 }
 
 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
index ff55f10..46df45e 100644 (file)
@@ -374,7 +374,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
                struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
 void nvmet_req_uninit(struct nvmet_req *req);
 bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len);
-void nvmet_req_execute(struct nvmet_req *req);
 void nvmet_req_complete(struct nvmet_req *req, u16 status);
 int nvmet_req_alloc_sgl(struct nvmet_req *req);
 void nvmet_req_free_sgl(struct nvmet_req *req);
index ccf9821..37d262a 100644 (file)
@@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
                return;
        }
 
-       nvmet_req_execute(&rsp->req);
+       rsp->req.execute(&rsp->req);
 }
 
 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
                                queue->cm_id->port_num, &rsp->read_cqe, NULL))
                        nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
        } else {
-               nvmet_req_execute(&rsp->req);
+               rsp->req.execute(&rsp->req);
        }
 
        return true;
index 3378480..af674fc 100644 (file)
@@ -930,7 +930,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
                goto out;
        }
 
-       nvmet_req_execute(&queue->cmd->req);
+       queue->cmd->req.execute(&queue->cmd->req);
 out:
        nvmet_prepare_receive_pdu(queue);
        return ret;
@@ -1050,7 +1050,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
                        nvmet_tcp_prep_recv_ddgst(cmd);
                        return 0;
                }
-               nvmet_req_execute(&cmd->req);
+               cmd->req.execute(&cmd->req);
        }
 
        nvmet_prepare_receive_pdu(queue);
@@ -1090,7 +1090,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
 
        if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
            cmd->rbytes_done == cmd->req.transfer_len)
-               nvmet_req_execute(&cmd->req);
+               cmd->req.execute(&cmd->req);
        ret = 0;
 out:
        nvmet_prepare_receive_pdu(queue);