OSDN Git Service

Merge tag 'iommu-updates-v6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
[tomoyo/tomoyo-test1.git] / mm / page_alloc.c
index 8b17dcb..4524598 100644 (file)
@@ -284,17 +284,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
 #endif
 };
 
-static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
-       [NULL_COMPOUND_DTOR] = NULL,
-       [COMPOUND_PAGE_DTOR] = free_compound_page,
-#ifdef CONFIG_HUGETLB_PAGE
-       [HUGETLB_PAGE_DTOR] = free_huge_page,
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
-#endif
-};
-
 int min_free_kbytes = 1024;
 int user_min_free_kbytes = -1;
 static int watermark_boost_factor __read_mostly = 15000;
@@ -371,10 +360,16 @@ static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 }
 
-static __always_inline
-unsigned long __get_pfnblock_flags_mask(const struct page *page,
-                                       unsigned long pfn,
-                                       unsigned long mask)
+/**
+ * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
+ * @page: The page within the block of interest
+ * @pfn: The target page frame number
+ * @mask: mask of bits that the caller is interested in
+ *
+ * Return: pageblock_bits flags
+ */
+unsigned long get_pfnblock_flags_mask(const struct page *page,
+                                       unsigned long pfn, unsigned long mask)
 {
        unsigned long *bitmap;
        unsigned long bitidx, word_bitidx;
@@ -393,24 +388,10 @@ unsigned long __get_pfnblock_flags_mask(const struct page *page,
        return (word >> bitidx) & mask;
 }
 
-/**
- * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
- * @page: The page within the block of interest
- * @pfn: The target page frame number
- * @mask: mask of bits that the caller is interested in
- *
- * Return: pageblock_bits flags
- */
-unsigned long get_pfnblock_flags_mask(const struct page *page,
-                                       unsigned long pfn, unsigned long mask)
-{
-       return __get_pfnblock_flags_mask(page, pfn, mask);
-}
-
 static __always_inline int get_pfnblock_migratetype(const struct page *page,
                                        unsigned long pfn)
 {
-       return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
+       return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
 }
 
 /**
@@ -538,8 +519,6 @@ out:
 
 static inline unsigned int order_to_pindex(int migratetype, int order)
 {
-       int base = order;
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        if (order > PAGE_ALLOC_COSTLY_ORDER) {
                VM_BUG_ON(order != pageblock_order);
@@ -549,7 +528,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order)
        VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
 #endif
 
-       return (MIGRATE_PCPTYPES * base) + migratetype;
+       return (MIGRATE_PCPTYPES * order) + migratetype;
 }
 
 static inline int pindex_to_order(unsigned int pindex)
@@ -593,19 +572,10 @@ static inline void free_the_page(struct page *page, unsigned int order)
  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
  *
- * The first tail page's ->compound_dtor holds the offset in array of compound
- * page destructors. See compound_page_dtors.
- *
  * The first tail page's ->compound_order holds the order of allocation.
  * This usage means that zero-order pages may not be compound.
  */
 
-void free_compound_page(struct page *page)
-{
-       mem_cgroup_uncharge(page_folio(page));
-       free_the_page(page, compound_order(page));
-}
-
 void prep_compound_page(struct page *page, unsigned int order)
 {
        int i;
@@ -620,10 +590,16 @@ void prep_compound_page(struct page *page, unsigned int order)
 
 void destroy_large_folio(struct folio *folio)
 {
-       enum compound_dtor_id dtor = folio->_folio_dtor;
+       if (folio_test_hugetlb(folio)) {
+               free_huge_folio(folio);
+               return;
+       }
 
-       VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
-       compound_page_dtors[dtor](&folio->page);
+       if (folio_test_large_rmappable(folio))
+               folio_undo_large_rmappable(folio);
+
+       mem_cgroup_uncharge(folio);
+       free_the_page(&folio->page, folio_order(folio));
 }
 
 static inline void set_buddy_order(struct page *page, unsigned int order)
@@ -823,7 +799,7 @@ static inline void __free_one_page(struct page *page,
                         * pageblock isolation could cause incorrect freepage or CMA
                         * accounting or HIGHATOMIC accounting.
                         */
-                       int buddy_mt = get_pageblock_migratetype(buddy);
+                       int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
 
                        if (migratetype != buddy_mt
                                        && (!migratetype_is_mergeable(migratetype) ||
@@ -899,7 +875,7 @@ int split_free_page(struct page *free_page,
                goto out;
        }
 
-       mt = get_pageblock_migratetype(free_page);
+       mt = get_pfnblock_migratetype(free_page, free_page_pfn);
        if (likely(!is_migrate_isolate(mt)))
                __mod_zone_freepage_state(zone, -(1UL << order), mt);
 
@@ -1131,7 +1107,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
                VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
 
                if (compound)
-                       ClearPageHasHWPoisoned(page);
+                       page[1].flags &= ~PAGE_FLAGS_SECOND;
                for (i = 1; i < (1 << order); i++) {
                        if (compound)
                                bad += free_tail_page_prepare(page, page + i);
@@ -1209,8 +1185,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                                        int pindex)
 {
        unsigned long flags;
-       int min_pindex = 0;
-       int max_pindex = NR_PCP_LISTS - 1;
        unsigned int order;
        bool isolated_pageblocks;
        struct page *page;
@@ -1233,17 +1207,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
 
                /* Remove pages from lists in a round-robin fashion. */
                do {
-                       if (++pindex > max_pindex)
-                               pindex = min_pindex;
+                       if (++pindex > NR_PCP_LISTS - 1)
+                               pindex = 0;
                        list = &pcp->lists[pindex];
-                       if (!list_empty(list))
-                               break;
-
-                       if (pindex == max_pindex)
-                               max_pindex--;
-                       if (pindex == min_pindex)
-                               min_pindex++;
-               } while (1);
+               } while (list_empty(list));
 
                order = pindex_to_order(pindex);
                nr_pages = 1 << order;
@@ -1910,8 +1877,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
  * Reserve a pageblock for exclusive use of high-order atomic allocations if
  * there are no empty page blocks that contain a page with a suitable order
  */
-static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
-                               unsigned int alloc_order)
+static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
 {
        int mt;
        unsigned long max_managed, flags;
@@ -2351,10 +2317,10 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn,
        return true;
 }
 
-static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch,
-                      bool free_high)
+static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)
 {
        int min_nr_free, max_nr_free;
+       int batch = READ_ONCE(pcp->batch);
 
        /* Free everything if batch freeing high-order pages. */
        if (unlikely(free_high))
@@ -2421,9 +2387,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
 
        high = nr_pcp_high(pcp, zone, free_high);
        if (pcp->count >= high) {
-               int batch = READ_ONCE(pcp->batch);
-
-               free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex);
+               free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex);
        }
 }
 
@@ -3223,7 +3187,7 @@ try_this_zone:
                         * if the pageblock should be reserved for the future
                         */
                        if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
-                               reserve_highatomic_pageblock(page, zone, order);
+                               reserve_highatomic_pageblock(page, zone);
 
                        return page;
                } else {
@@ -4506,10 +4470,11 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
 {
        struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
                        preferred_nid, nodemask);
+       struct folio *folio = (struct folio *)page;
 
-       if (page && order > 1)
-               prep_transhuge_page(page);
-       return (struct folio *)page;
+       if (folio && order > 1)
+               folio_prep_large_rmappable(folio);
+       return folio;
 }
 EXPORT_SYMBOL(__folio_alloc);