int i;
unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
- int need_release = 0;
unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
if (access & IB_ACCESS_ON_DEMAND) {
ret = ib_umem_odp_get(context, umem, access);
- if (ret) {
- kfree(umem);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto umem_kfree;
return umem;
}
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
- kfree(umem);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto umem_kfree;
}
/*
if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
up_write(¤t->mm->mmap_sem);
ret = -ENOMEM;
- goto out;
+ goto vma;
}
up_write(¤t->mm->mmap_sem);
if (npages == 0 || npages > UINT_MAX) {
ret = -EINVAL;
- goto out;
+ goto vma;
}
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
if (ret)
- goto out;
+ goto vma;
if (!umem->writable)
gup_flags |= FOLL_FORCE;
- need_release = 1;
sg_list_start = umem->sg_head.sgl;
down_read(¤t->mm->mmap_sem);
gup_flags, page_list, vma_list);
if (ret < 0) {
up_read(¤t->mm->mmap_sem);
- goto out;
+ goto umem_release;
}
umem->npages += ret;
if (!umem->nmap) {
ret = -ENOMEM;
- goto out;
+ goto umem_release;
}
ret = 0;
+ goto out;
+umem_release:
+ __ib_umem_release(context->device, umem, 0);
+vma:
+ down_write(¤t->mm->mmap_sem);
+ current->mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(¤t->mm->mmap_sem);
out:
- if (ret < 0) {
- down_write(¤t->mm->mmap_sem);
- current->mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(¤t->mm->mmap_sem);
- if (need_release)
- __ib_umem_release(context->device, umem, 0);
- kfree(umem);
- }
-
if (vma_list)
free_page((unsigned long) vma_list);
free_page((unsigned long) page_list);
-
- return ret < 0 ? ERR_PTR(ret) : umem;
+umem_kfree:
+ if (ret)
+ kfree(umem);
+ return ret ? ERR_PTR(ret) : umem;
}
EXPORT_SYMBOL(ib_umem_get);