OSDN Git Service

RDMA/siw: Fix user page pinning accounting
authorBernard Metzler <bmt@zurich.ibm.com>
Thu, 2 Feb 2023 10:10:00 +0000 (11:10 +0100)
committerLeon Romanovsky <leon@kernel.org>
Mon, 6 Feb 2023 12:46:50 +0000 (14:46 +0200)
To avoid racing with other user memory reservations, immediately
account full amount of pages to be pinned.

Fixes: 2251334dcac9 ("rdma/siw: application buffer management")
Reported-by: Jason Gunthorpe <jgg@nvidia.com>
Suggested-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
Link: https://lore.kernel.org/r/20230202101000.402990-1-bmt@zurich.ibm.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/sw/siw/siw_mem.c

index b2b33dd..f51ab2c 100644 (file)
@@ -398,7 +398,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
 
        mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
-       if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) {
+       if (atomic64_add_return(num_pages, &mm_s->pinned_vm) > mlock_limit) {
                rv = -ENOMEM;
                goto out_sem_up;
        }
@@ -411,30 +411,27 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
                goto out_sem_up;
        }
        for (i = 0; num_pages; i++) {
-               int got, nents = min_t(int, num_pages, PAGES_PER_CHUNK);
-
-               umem->page_chunk[i].plist =
+               int nents = min_t(int, num_pages, PAGES_PER_CHUNK);
+               struct page **plist =
                        kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
-               if (!umem->page_chunk[i].plist) {
+
+               if (!plist) {
                        rv = -ENOMEM;
                        goto out_sem_up;
                }
-               got = 0;
+               umem->page_chunk[i].plist = plist;
                while (nents) {
-                       struct page **plist = &umem->page_chunk[i].plist[got];
-
                        rv = pin_user_pages(first_page_va, nents, foll_flags,
                                            plist, NULL);
                        if (rv < 0)
                                goto out_sem_up;
 
                        umem->num_pages += rv;
-                       atomic64_add(rv, &mm_s->pinned_vm);
                        first_page_va += rv * PAGE_SIZE;
+                       plist += rv;
                        nents -= rv;
-                       got += rv;
+                       num_pages -= rv;
                }
-               num_pages -= got;
        }
 out_sem_up:
        mmap_read_unlock(mm_s);
@@ -442,6 +439,10 @@ out_sem_up:
        if (rv > 0)
                return umem;
 
+       /* Adjust accounting for pages not pinned */
+       if (num_pages)
+               atomic64_sub(num_pages, &mm_s->pinned_vm);
+
        siw_umem_release(umem, false);
 
        return ERR_PTR(rv);