OSDN Git Service

mm: convert do_anonymous_page() to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 16 Jan 2023 19:18:10 +0000 (19:18 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:18 +0000 (22:33 -0800)
Removes six calls to compound_head(); some inline and some external.

Link: https://lkml.kernel.org/r/20230116191813.2145215-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index b6358ff..950e5a4 100644 (file)
@@ -3996,7 +3996,6 @@ out_release:
 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
-       struct page *page;
        struct folio *folio;
        vm_fault_t ret = 0;
        pte_t entry;
@@ -4051,19 +4050,18 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        if (!folio)
                goto oom;
 
-       page = &folio->page;
        if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
                goto oom_free_page;
-       cgroup_throttle_swaprate(page, GFP_KERNEL);
+       cgroup_throttle_swaprate(&folio->page, GFP_KERNEL);
 
        /*
-        * The memory barrier inside __SetPageUptodate makes sure that
+        * The memory barrier inside __folio_mark_uptodate makes sure that
         * preceding stores to the page contents become visible before
         * the set_pte_at() write.
         */
-       __SetPageUptodate(page);
+       __folio_mark_uptodate(folio);
 
-       entry = mk_pte(page, vma->vm_page_prot);
+       entry = mk_pte(&folio->page, vma->vm_page_prot);
        entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
@@ -4082,13 +4080,13 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        /* Deliver the page fault to userland, check inside PT lock */
        if (userfaultfd_missing(vma)) {
                pte_unmap_unlock(vmf->pte, vmf->ptl);
-               put_page(page);
+               folio_put(folio);
                return handle_userfault(vmf, VM_UFFD_MISSING);
        }
 
        inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
-       page_add_new_anon_rmap(page, vma, vmf->address);
-       lru_cache_add_inactive_or_unevictable(page, vma);
+       folio_add_new_anon_rmap(folio, vma, vmf->address);
+       folio_add_lru_vma(folio, vma);
 setpte:
        set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
@@ -4098,10 +4096,10 @@ unlock:
        pte_unmap_unlock(vmf->pte, vmf->ptl);
        return ret;
 release:
-       put_page(page);
+       folio_put(folio);
        goto unlock;
 oom_free_page:
-       put_page(page);
+       folio_put(folio);
 oom:
        return VM_FAULT_OOM;
 }