OSDN Git Service

RDMA/hns: Refactor the alloc_cqc()
authorWenpeng Liang <liangwenpeng@huawei.com>
Wed, 2 Mar 2022 06:48:30 +0000 (14:48 +0800)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 4 Mar 2022 21:36:32 +0000 (17:36 -0400)
Abstract the alloc_cqc() into several parts and separate the process
unrelated to allocating CQC.

Link: https://lore.kernel.org/r/20220302064830.61706-10-liangwenpeng@huawei.com
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/hns/hns_roce_cq.c

index 3d10300..8acd599 100644 (file)
@@ -100,12 +100,39 @@ static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
        mutex_unlock(&cq_table->bank_mutex);
 }
 
+static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev,
+                              struct hns_roce_cq *hr_cq,
+                              u64 *mtts, dma_addr_t dma_handle)
+{
+       struct ib_device *ibdev = &hr_dev->ib_dev;
+       struct hns_roce_cmd_mailbox *mailbox;
+       int ret;
+
+       mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+       if (IS_ERR(mailbox)) {
+               ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n");
+               return PTR_ERR(mailbox);
+       }
+
+       hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
+
+       ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
+                                    hr_cq->cqn);
+       if (ret)
+               ibdev_err(ibdev,
+                         "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
+                         hr_cq->cqn, ret);
+
+       hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+       return ret;
+}
+
 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 {
        struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
        struct ib_device *ibdev = &hr_dev->ib_dev;
-       struct hns_roce_cmd_mailbox *mailbox;
-       u64 mtts[MTT_MIN_COUNT] = { 0 };
+       u64 mtts[MTT_MIN_COUNT] = {};
        dma_addr_t dma_handle;
        int ret;
 
@@ -121,7 +148,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
        if (ret) {
                ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
                          hr_cq->cqn, ret);
-               goto err_out;
+               return ret;
        }
 
        ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
@@ -130,40 +157,17 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
                goto err_put;
        }
 
-       /* Allocate mailbox memory */
-       mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
-       if (IS_ERR(mailbox)) {
-               ret = PTR_ERR(mailbox);
-               goto err_xa;
-       }
-
-       hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
-
-       ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
-                                    hr_cq->cqn);
-       hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-       if (ret) {
-               ibdev_err(ibdev,
-                         "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
-                         hr_cq->cqn, ret);
+       ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
+       if (ret)
                goto err_xa;
-       }
-
-       hr_cq->cons_index = 0;
-       hr_cq->arm_sn = 1;
-
-       refcount_set(&hr_cq->refcount, 1);
-       init_completion(&hr_cq->free);
 
        return 0;
 
 err_xa:
        xa_erase(&cq_table->array, hr_cq->cqn);
-
 err_put:
        hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
 
-err_out:
        return ret;
 }
 
@@ -411,6 +415,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
                        goto err_cqc;
        }
 
+       hr_cq->cons_index = 0;
+       hr_cq->arm_sn = 1;
+       refcount_set(&hr_cq->refcount, 1);
+       init_completion(&hr_cq->free);
+
        return 0;
 
 err_cqc: