lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
down_write(&mm->mmap_sem);
- if (check_add_overflow(mm->pinned_vm, npages, &new_pinned) ||
- (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) {
+ new_pinned = atomic64_read(&mm->pinned_vm) + npages;
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
up_write(&mm->mmap_sem);
ret = -ENOMEM;
goto out;
}
- mm->pinned_vm = new_pinned;
+ atomic64_set(&mm->pinned_vm, new_pinned);
up_write(&mm->mmap_sem);
cur_base = addr & PAGE_MASK;
__ib_umem_release(context->device, umem, 0);
vma:
down_write(&mm->mmap_sem);
- mm->pinned_vm -= ib_umem_num_pages(umem);
+ atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
up_write(&mm->mmap_sem);
out:
if (vma_list)
struct ib_umem *umem = container_of(work, struct ib_umem, work);
down_write(&umem->owning_mm->mmap_sem);
- umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
+ atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
up_write(&umem->owning_mm->mmap_sem);
__ib_umem_release_tail(umem);
} else {
down_write(&umem->owning_mm->mmap_sem);
}
- umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
+ atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
up_write(&umem->owning_mm->mmap_sem);
__ib_umem_release_tail(umem);
size = DIV_ROUND_UP(size, PAGE_SIZE);
down_read(&mm->mmap_sem);
- pinned = mm->pinned_vm;
+ pinned = atomic64_read(&mm->pinned_vm);
up_read(&mm->mmap_sem);
/* First, check the absolute limit against all pinned pages. */
return ret;
down_write(&mm->mmap_sem);
- mm->pinned_vm += ret;
+ atomic64_add(ret, &mm->pinned_vm);
up_write(&mm->mmap_sem);
return ret;
if (mm) { /* during close after signal, mm can be NULL */
down_write(&mm->mmap_sem);
- mm->pinned_vm -= npages;
+ atomic64_sub(npages, &mm->pinned_vm);
up_write(&mm->mmap_sem);
}
}
goto bail_release;
}
- current->mm->pinned_vm += num_pages;
+ atomic64_add(num_pages, ¤t->mm->pinned_vm);
ret = 0;
goto bail;
__qib_release_user_pages(p, num_pages, 1);
if (current->mm) {
- current->mm->pinned_vm -= num_pages;
+ atomic64_sub(num_pages, ¤t->mm->pinned_vm);
up_write(¤t->mm->mmap_sem);
}
}
uiomr->owning_mm = mm = current->mm;
down_write(&mm->mmap_sem);
- locked = npages + current->mm->pinned_vm;
+ locked = npages + atomic64_read(¤t->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
if (ret < 0)
usnic_uiom_put_pages(chunk_list, 0);
else {
- mm->pinned_vm = locked;
+ atomic64_set(&mm->pinned_vm, locked);
mmgrab(uiomr->owning_mm);
}
container_of(work, struct usnic_uiom_reg, work);
down_write(&uiomr->owning_mm->mmap_sem);
- uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
up_write(&uiomr->owning_mm->mmap_sem);
__usnic_uiom_release_tail(uiomr);
} else {
down_write(&uiomr->owning_mm->mmap_sem);
}
- uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
up_write(&uiomr->owning_mm->mmap_sem);
__usnic_uiom_release_tail(uiomr);
} else {
down_write(&mm->mmap_sem);
}
- mm->pinned_vm -= nr_pages;
+ atomic64_sub(nr_pages, &mm->pinned_vm);
up_write(&mm->mmap_sem);
return 0;
}
return 0;
locked = nr_pages;
- locked += mm->pinned_vm;
+ locked += atomic64_read(&mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
dev_err(scif_info.mdev.this_device,
locked, lock_limit);
return -ENOMEM;
}
- mm->pinned_vm = locked;
+ atomic64_set(&mm->pinned_vm, locked);
return 0;
}
SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
- SEQ_PUT_DEC(" kB\nVmPin:\t", mm->pinned_vm);
+ SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
unsigned long total_vm; /* Total pages mapped */
unsigned long locked_vm; /* Pages that have PG_mlocked set */
- unsigned long pinned_vm; /* Refcount permanently increased */
+ atomic64_t pinned_vm; /* Refcount permanently increased */
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
/* now it's safe to free the pages */
atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
- vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
+ atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
/* this has to be the last one */
rb_free_aux(rb);
*/
atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
- vma->vm_mm->pinned_vm -= mmap_locked;
+ atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
free_uid(mmap_user);
out_put:
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
- locked = vma->vm_mm->pinned_vm + extra;
+ locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
!capable(CAP_IPC_LOCK)) {
unlock:
if (!ret) {
atomic_long_add(user_extra, &user->locked_vm);
- vma->vm_mm->pinned_vm += extra;
+ atomic64_add(extra, &vma->vm_mm->pinned_vm);
atomic_inc(&event->mmap_count);
} else if (rb) {
mm_pgtables_bytes_init(mm);
mm->map_count = 0;
mm->locked_vm = 0;
- mm->pinned_vm = 0;
+ atomic64_set(&mm->pinned_vm, 0);
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
spin_lock_init(&mm->arg_lock);
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
- "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
+ "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
"start_brk %lx brk %lx start_stack %lx\n"
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
mm_pgtables_bytes(mm),
mm->map_count,
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
- mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
+ atomic64_read(&mm->pinned_vm),
+ mm->data_vm, mm->exec_vm, mm->stack_vm,
mm->start_code, mm->end_code, mm->start_data, mm->end_data,
mm->start_brk, mm->brk, mm->start_stack,
mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,