OSDN Git Service

RDMA/rdmavt: Adapt to handle non-uniform sizes on umem SGEs
authorShiraz, Saleem <shiraz.saleem@intel.com>
Tue, 12 Feb 2019 16:52:24 +0000 (10:52 -0600)
committerJason Gunthorpe <jgg@mellanox.com>
Wed, 13 Feb 2019 16:00:43 +0000 (09:00 -0700)
rdmavt expects a uniform size on all umem SGEs which is currently at
PAGE_SIZE.

Adapt to a umem API change which could return non-uniform sized SGEs due
to combining contiguous PAGE_SIZE regions into an SGE. Use
for_each_sg_page variant to unfold the larger SGEs into a list of
PAGE_SIZE elements.

Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.

Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/sw/rdmavt/mr.c

index 8b1c1e8..7287950 100644 (file)
@@ -381,8 +381,8 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
        struct rvt_mr *mr;
        struct ib_umem *umem;
-       struct scatterlist *sg;
-       int n, m, entry;
+       struct sg_page_iter sg_iter;
+       int n, m;
        struct ib_mr *ret;
 
        if (length == 0)
@@ -407,23 +407,21 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        mr->mr.access_flags = mr_access_flags;
        mr->umem = umem;
 
-       mr->mr.page_shift = umem->page_shift;
+       mr->mr.page_shift = PAGE_SHIFT;
        m = 0;
        n = 0;
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+       for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
                void *vaddr;
 
-               vaddr = page_address(sg_page(sg));
+               vaddr = page_address(sg_page_iter_page(&sg_iter));
                if (!vaddr) {
                        ret = ERR_PTR(-EINVAL);
                        goto bail_inval;
                }
                mr->mr.map[m]->segs[n].vaddr = vaddr;
-               mr->mr.map[m]->segs[n].length = BIT(umem->page_shift);
-               trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr,
-                                     BIT(umem->page_shift));
-               n++;
-               if (n == RVT_SEGSZ) {
+               mr->mr.map[m]->segs[n].length = PAGE_SIZE;
+               trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
+               if (++n == RVT_SEGSZ) {
                        m++;
                        n = 0;
                }