}
set_compound_order(page, 0);
+#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
+#endif
__ClearPageHead(page);
}
for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
__ClearPageReserved(p);
set_compound_order(page, 0);
+#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
+#endif
__ClearPageHead(page);
return false;
}
set_page_dirty(page);
hugetlb_count_sub(pages_per_huge_page(h), mm);
- page_remove_rmap(page, true);
+ page_remove_rmap(page, vma, true);
spin_unlock(ptl);
tlb_remove_page_size(tlb, page, huge_page_size(h));
/* Break COW */
huge_ptep_clear_flush(vma, haddr, ptep);
mmu_notifier_invalidate_range(mm, range.start, range.end);
- page_remove_rmap(old_page, true);
+ page_remove_rmap(old_page, vma, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
set_huge_pte_at(mm, haddr, ptep,
make_huge_pte(vma, new_page, 1));
pgoff_t idx,
unsigned int flags,
unsigned long haddr,
+ unsigned long addr,
unsigned long reason)
{
vm_fault_t ret;
struct vm_fault vmf = {
.vma = vma,
.address = haddr,
+ .real_address = addr,
.flags = flags,
/*
/* Check for page in userfault range */
if (userfaultfd_missing(vma)) {
ret = hugetlb_handle_userfault(vma, mapping, idx,
- flags, haddr,
+ flags, haddr, address,
VM_UFFD_MISSING);
goto out;
}
unlock_page(page);
put_page(page);
ret = hugetlb_handle_userfault(vma, mapping, idx,
- flags, haddr,
+ flags, haddr, address,
VM_UFFD_MINOR);
goto out;
}
if (pages) {
/*
- * try_grab_compound_head() should always succeed here,
+ * try_grab_folio() should always succeed here,
* because: a) we hold the ptl lock, and b) we've just
* checked that the huge page is present in the page
* tables. If the huge page is present, then the tail
* any way. So this page must be available at this
* point, unless the page refcount overflowed:
*/
- if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
- refs,
- flags))) {
+ if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
+ flags))) {
spin_unlock(ptl);
remainder = 0;
err = -ENOMEM;