OSDN Git Service

Merge tag 'gpio-v5.5-4' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux...
[tomoyo/tomoyo-test1.git] / mm / page_alloc.c
index 4785a8a..d047bf7 100644 (file)
@@ -694,34 +694,27 @@ void prep_compound_page(struct page *page, unsigned int order)
 #ifdef CONFIG_DEBUG_PAGEALLOC
 unsigned int _debug_guardpage_minorder;
 
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
-#else
+bool _debug_pagealloc_enabled_early __read_mostly
+                       = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
+EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
-#endif
 EXPORT_SYMBOL(_debug_pagealloc_enabled);
 
 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
 
 static int __init early_debug_pagealloc(char *buf)
 {
-       bool enable = false;
-
-       if (kstrtobool(buf, &enable))
-               return -EINVAL;
-
-       if (enable)
-               static_branch_enable(&_debug_pagealloc_enabled);
-
-       return 0;
+       return kstrtobool(buf, &_debug_pagealloc_enabled_early);
 }
 early_param("debug_pagealloc", early_debug_pagealloc);
 
-static void init_debug_guardpage(void)
+void init_debug_pagealloc(void)
 {
        if (!debug_pagealloc_enabled())
                return;
 
+       static_branch_enable(&_debug_pagealloc_enabled);
+
        if (!debug_guardpage_minorder())
                return;
 
@@ -1186,7 +1179,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
         */
        arch_free_page(page, order);
 
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                kernel_map_pages(page, 1 << order, 0);
 
        kasan_free_nondeferred_pages(page, order);
@@ -1207,7 +1200,7 @@ static bool free_pcp_prepare(struct page *page)
 
 static bool bulkfree_pcp_prepare(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return free_pages_check(page);
        else
                return false;
@@ -1221,7 +1214,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
  */
 static bool free_pcp_prepare(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return free_pages_prepare(page, 0, true);
        else
                return free_pages_prepare(page, 0, false);
@@ -1973,10 +1966,6 @@ void __init page_alloc_init_late(void)
 
        for_each_populated_zone(zone)
                set_zone_contiguous(zone);
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       init_debug_guardpage();
-#endif
 }
 
 #ifdef CONFIG_CMA
@@ -2106,7 +2095,7 @@ static inline bool free_pages_prezeroed(void)
  */
 static inline bool check_pcp_refill(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return check_new_page(page);
        else
                return false;
@@ -2128,7 +2117,7 @@ static inline bool check_pcp_refill(struct page *page)
 }
 static inline bool check_new_pcp(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return check_new_page(page);
        else
                return false;
@@ -2155,7 +2144,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
        set_page_refcounted(page);
 
        arch_alloc_page(page, order);
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                kernel_map_pages(page, 1 << order, 1);
        kasan_alloc_pages(page, order);
        kernel_poison_pages(page, 1 << order, 1);
@@ -4476,8 +4465,11 @@ retry_cpuset:
                if (page)
                        goto got_pg;
 
-                if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
-                    !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
+               /*
+                * Checks for costly allocations with __GFP_NORETRY, which
+                * includes some THP page fault allocations
+                */
+               if (costly_order && (gfp_mask & __GFP_NORETRY)) {
                        /*
                         * If allocating entire pageblock(s) and compaction
                         * failed because all zones are below low watermarks
@@ -4498,23 +4490,6 @@ retry_cpuset:
                        if (compact_result == COMPACT_SKIPPED ||
                            compact_result == COMPACT_DEFERRED)
                                goto nopage;
-               }
-
-               /*
-                * Checks for costly allocations with __GFP_NORETRY, which
-                * includes THP page fault allocations
-                */
-               if (costly_order && (gfp_mask & __GFP_NORETRY)) {
-                       /*
-                        * If compaction is deferred for high-order allocations,
-                        * it is because sync compaction recently failed. If
-                        * this is the case and the caller requested a THP
-                        * allocation, we do not want to heavily disrupt the
-                        * system, so we fail the allocation instead of entering
-                        * direct reclaim.
-                        */
-                       if (compact_result == COMPACT_DEFERRED)
-                               goto nopage;
 
                        /*
                         * Looks like reclaim/compaction is worth trying, but