OSDN Git Service

RDMA/cxgb3: Use for_each_sg_dma_page iterator on umem SGL
authorShiraz, Saleem <shiraz.saleem@intel.com>
Mon, 11 Feb 2019 15:25:02 +0000 (09:25 -0600)
committerJason Gunthorpe <jgg@mellanox.com>
Mon, 11 Feb 2019 22:24:55 +0000 (15:24 -0700)
Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped
SGL and get the page DMA address. This avoids the extra loop to iterate
pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.

Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com>
Acked-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/cxgb3/iwch_provider.c

index 4cc9a6a..80dff68 100644 (file)
@@ -516,14 +516,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                      u64 virt, int acc, struct ib_udata *udata)
 {
        __be64 *pages;
-       int shift, n, len;
-       int i, k, entry;
+       int shift, n, i;
        int err = 0;
        struct iwch_dev *rhp;
        struct iwch_pd *php;
        struct iwch_mr *mhp;
        struct iwch_reg_user_mr_resp uresp;
-       struct scatterlist *sg;
+       struct sg_dma_page_iter sg_iter;
        pr_debug("%s ib_pd %p\n", __func__, pd);
 
        php = to_iwch_pd(pd);
@@ -541,7 +540,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                return ERR_PTR(err);
        }
 
-       shift = mhp->umem->page_shift;
+       shift = PAGE_SHIFT;
 
        n = mhp->umem->nmap;
 
@@ -557,19 +556,15 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
        i = n = 0;
 
-       for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
-                       len = sg_dma_len(sg) >> shift;
-                       for (k = 0; k < len; ++k) {
-                               pages[i++] = cpu_to_be64(sg_dma_address(sg) +
-                                                        (k << shift));
-                               if (i == PAGE_SIZE / sizeof *pages) {
-                                       err = iwch_write_pbl(mhp, pages, i, n);
-                                       if (err)
-                                               goto pbl_done;
-                                       n += i;
-                                       i = 0;
-                               }
-                       }
+       for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
+               pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
+               if (i == PAGE_SIZE / sizeof *pages) {
+                       err = iwch_write_pbl(mhp, pages, i, n);
+                       if (err)
+                               goto pbl_done;
+                       n += i;
+                       i = 0;
+               }
        }
 
        if (i)