OSDN Git Service

RDMA/bnxt_re: Fix the page_size used during the MR creation
authorSelvin Xavier <selvin.xavier@broadcom.com>
Sun, 7 May 2023 18:29:29 +0000 (11:29 -0700)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 12 May 2023 21:30:30 +0000 (18:30 -0300)
Driver populates the list of pages used for Memory region wrongly when
page size is more than system page size. This is causing a failure when
some of the applications that creates MR with page size as 2M.  Since HW
can support multiple page sizes, pass the correct page size while creating
the MR.

Also, driver need not adjust the number of pages when HW Queues are
created with user memory. It should work with the number of dma blocks
returned by ib_umem_num_dma_blocks. Fix this calculation also.

Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
Fixes: f6919d56388c ("RDMA/bnxt_re: Code refactor while populating user MRs")
Link: https://lore.kernel.org/r/1683484169-9539-1-git-send-email-selvin.xavier@broadcom.com
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_sp.c

index 126d4f2..81b0c5e 100644 (file)
@@ -215,17 +215,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
                        return -EINVAL;
                hwq_attr->sginfo->npages = npages;
        } else {
-               unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
-                       hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
-
+               npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
+                                               hwq_attr->sginfo->pgsize);
                hwq->is_user = true;
-               npages = sginfo_num_pages;
-               npages = (npages * PAGE_SIZE) /
-                         BIT_ULL(hwq_attr->sginfo->pgshft);
-               if ((sginfo_num_pages * PAGE_SIZE) %
-                    BIT_ULL(hwq_attr->sginfo->pgshft))
-                       if (!npages)
-                               npages++;
        }
 
        if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
index 1714a1e..b967a17 100644 (file)
@@ -617,16 +617,15 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
                /* Free the hwq if it already exist, must be a rereg */
                if (mr->hwq.max_elements)
                        bnxt_qplib_free_hwq(res, &mr->hwq);
-               /* Use system PAGE_SIZE */
                hwq_attr.res = res;
                hwq_attr.depth = pages;
-               hwq_attr.stride = buf_pg_size;
+               hwq_attr.stride = sizeof(dma_addr_t);
                hwq_attr.type = HWQ_TYPE_MR;
                hwq_attr.sginfo = &sginfo;
                hwq_attr.sginfo->umem = umem;
                hwq_attr.sginfo->npages = pages;
-               hwq_attr.sginfo->pgsize = PAGE_SIZE;
-               hwq_attr.sginfo->pgshft = PAGE_SHIFT;
+               hwq_attr.sginfo->pgsize = buf_pg_size;
+               hwq_attr.sginfo->pgshft = ilog2(buf_pg_size);
                rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
                if (rc) {
                        dev_err(&res->pdev->dev,