OSDN Git Service

nvme: pass nr_maps explicitly to nvme_alloc_io_tag_set
authorChristoph Hellwig <hch@lst.de>
Wed, 30 Nov 2022 16:16:52 +0000 (17:16 +0100)
committerChristoph Hellwig <hch@lst.de>
Wed, 7 Dec 2022 14:02:15 +0000 (15:02 +0100)
Don't look at ctrl->ops as only RDMA and TCP actually support multiple
maps.

Fixes: 6dfba1c09c10 ("nvme-fc: use the tagset alloc/free helpers")
Fixes: ceee1953f923 ("nvme-loop: use the tagset alloc/free helpers")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index 30717f7..3b36990 100644 (file)
@@ -4891,7 +4891,7 @@ EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
 
 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
                const struct blk_mq_ops *ops, unsigned int flags,
-               unsigned int cmd_size)
+               unsigned int nr_maps, unsigned int cmd_size)
 {
        int ret;
 
@@ -4905,8 +4905,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
        set->driver_data = ctrl;
        set->nr_hw_queues = ctrl->queue_count - 1;
        set->timeout = NVME_IO_TIMEOUT;
-       if (ops->map_queues)
-               set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+       set->nr_maps = nr_maps;
        ret = blk_mq_alloc_tag_set(set);
        if (ret)
                return ret;
index bb89c7f..1a4e009 100644 (file)
@@ -2916,7 +2916,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
        nvme_fc_init_io_queues(ctrl);
 
        ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
-                       &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+                       &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
                        struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
                                    ctrl->lport->ops->fcprqst_priv_sz));
        if (ret)
index 2cad9f6..6c45654 100644 (file)
@@ -747,7 +747,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
                const struct blk_mq_ops *ops, unsigned int flags,
-               unsigned int cmd_size);
+               unsigned int nr_maps, unsigned int cmd_size);
 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
 
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
index cc61a1b..cf8f500 100644 (file)
@@ -798,7 +798,9 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
                            NVME_RDMA_METADATA_SGL_SIZE;
 
        return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
-                       &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
+                       &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+                       ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
+                       cmd_size);
 }
 
 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
index 95e54e9..fa245a5 100644 (file)
@@ -1859,6 +1859,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
                ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
                                &nvme_tcp_mq_ops,
                                BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
+                               ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
                                sizeof(struct nvme_tcp_request));
                if (ret)
                        goto out_free_io_queues;
index 0015aed..da32727 100644 (file)
@@ -494,7 +494,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
                return ret;
 
        ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
-                       &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+                       &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE, 1,
                        sizeof(struct nvme_loop_iod) +
                        NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
        if (ret)