OSDN Git Service

mm: convert prep_transhuge_page() to folio_prep_large_rmappable()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 16 Aug 2023 15:11:53 +0000 (16:11 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 21 Aug 2023 21:28:43 +0000 (14:28 -0700)
Match folio_undo_large_rmappable(), and move the casting from page to
folio into the callers (which they were largely doing anyway).

Link: https://lkml.kernel.org/r/20230816151201.3655946-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c
mm/khugepaged.c
mm/mempolicy.c
mm/page_alloc.c

index ceda26a..fa0350b 100644 (file)
@@ -140,7 +140,7 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags);
 
-void prep_transhuge_page(struct page *page);
+void folio_prep_large_rmappable(struct folio *folio);
 bool can_split_folio(struct folio *folio, int *pextra_pins);
 int split_huge_page_to_list(struct page *page, struct list_head *list);
 static inline int split_huge_page(struct page *page)
@@ -280,7 +280,7 @@ static inline bool hugepage_vma_check(struct vm_area_struct *vma,
        return false;
 }
 
-static inline void prep_transhuge_page(struct page *page) {}
+static inline void folio_prep_large_rmappable(struct folio *folio) {}
 
 #define transparent_hugepage_flags 0UL
 
index b334566..5817bf7 100644 (file)
@@ -577,10 +577,8 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
 }
 #endif
 
-void prep_transhuge_page(struct page *page)
+void folio_prep_large_rmappable(struct folio *folio)
 {
-       struct folio *folio = (struct folio *)page;
-
        VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
        INIT_LIST_HEAD(&folio->_deferred_list);
        folio_set_compound_dtor(folio, TRANSHUGE_PAGE_DTOR);
index 9a6e0d5..40d43ec 100644 (file)
@@ -896,7 +896,7 @@ static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
                return false;
        }
 
-       prep_transhuge_page(*hpage);
+       folio_prep_large_rmappable((struct folio *)*hpage);
        count_vm_event(THP_COLLAPSE_ALLOC);
        return true;
 }
index ec2eace..42b5567 100644 (file)
@@ -2195,9 +2195,9 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
                mpol_cond_put(pol);
                gfp |= __GFP_COMP;
                page = alloc_page_interleave(gfp, order, nid);
-               if (page && order > 1)
-                       prep_transhuge_page(page);
                folio = (struct folio *)page;
+               if (folio && order > 1)
+                       folio_prep_large_rmappable(folio);
                goto out;
        }
 
@@ -2208,9 +2208,9 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
                gfp |= __GFP_COMP;
                page = alloc_pages_preferred_many(gfp, order, node, pol);
                mpol_cond_put(pol);
-               if (page && order > 1)
-                       prep_transhuge_page(page);
                folio = (struct folio *)page;
+               if (folio && order > 1)
+                       folio_prep_large_rmappable(folio);
                goto out;
        }
 
@@ -2306,10 +2306,11 @@ EXPORT_SYMBOL(alloc_pages);
 struct folio *folio_alloc(gfp_t gfp, unsigned order)
 {
        struct page *page = alloc_pages(gfp | __GFP_COMP, order);
+       struct folio *folio = (struct folio *)page;
 
-       if (page && order > 1)
-               prep_transhuge_page(page);
-       return (struct folio *)page;
+       if (folio && order > 1)
+               folio_prep_large_rmappable(folio);
+       return folio;
 }
 EXPORT_SYMBOL(folio_alloc);
 
index 4047b58..a97d6fa 100644 (file)
@@ -4489,10 +4489,11 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
 {
        struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
                        preferred_nid, nodemask);
+       struct folio *folio = (struct folio *)page;
 
-       if (page && order > 1)
-               prep_transhuge_page(page);
-       return (struct folio *)page;
+       if (folio && order > 1)
+               folio_prep_large_rmappable(folio);
+       return folio;
 }
 EXPORT_SYMBOL(__folio_alloc);