OSDN Git Service

mm: move init_mem_debugging_and_hardening() to mm/mm_init.c
authorMike Rapoport (IBM) <rppt@kernel.org>
Tue, 21 Mar 2023 17:05:08 +0000 (19:05 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 6 Apr 2023 02:42:54 +0000 (19:42 -0700)
init_mem_debugging_and_hardening() is only called from mm_core_init().

Move it close to the caller, make it static and rename it to
mem_debugging_and_hardening_init() for consistency with surrounding
convention.

Link: https://lkml.kernel.org/r/20230321170513.2401534-10-rppt@kernel.org
Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Doug Berger <opendmb@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/internal.h
mm/mm_init.c
mm/page_alloc.c

index dfff8bc..0fa6a6f 100644 (file)
@@ -3251,7 +3251,6 @@ extern int apply_to_existing_page_range(struct mm_struct *mm,
                                   unsigned long address, unsigned long size,
                                   pte_fn_t fn, void *data);
 
-extern void __init init_mem_debugging_and_hardening(void);
 #ifdef CONFIG_PAGE_POISONING
 extern void __kernel_poison_pages(struct page *page, int numpages);
 extern void __kernel_unpoison_pages(struct page *page, int numpages);
index 22f1410..a2934c6 100644 (file)
@@ -204,6 +204,14 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
 
 extern char * const zone_names[MAX_NR_ZONES];
 
+/* perform sanity checks on struct pages being allocated or freed */
+DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
+
+static inline bool is_check_pages_enabled(void)
+{
+       return static_branch_unlikely(&check_pages_enabled);
+}
+
 /*
  * Structure for holding the mostly immutable allocation parameters passed
  * between functions involved in allocations, including the alloc_pages*
index f147541..43f6d3e 100644 (file)
@@ -2531,6 +2531,95 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
        __free_pages_core(page, order);
 }
 
+static bool _init_on_alloc_enabled_early __read_mostly
+                               = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
+static int __init early_init_on_alloc(char *buf)
+{
+
+       return kstrtobool(buf, &_init_on_alloc_enabled_early);
+}
+early_param("init_on_alloc", early_init_on_alloc);
+
+static bool _init_on_free_enabled_early __read_mostly
+                               = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
+static int __init early_init_on_free(char *buf)
+{
+       return kstrtobool(buf, &_init_on_free_enabled_early);
+}
+early_param("init_on_free", early_init_on_free);
+
+DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
+
+/*
+ * Enable static keys related to various memory debugging and hardening options.
+ * Some override others, and depend on early params that are evaluated in the
+ * order of appearance. So we need to first gather the full picture of what was
+ * enabled, and then make decisions.
+ */
+static void __init mem_debugging_and_hardening_init(void)
+{
+       bool page_poisoning_requested = false;
+       bool want_check_pages = false;
+
+#ifdef CONFIG_PAGE_POISONING
+       /*
+        * Page poisoning is debug page alloc for some arches. If
+        * either of those options are enabled, enable poisoning.
+        */
+       if (page_poisoning_enabled() ||
+            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
+             debug_pagealloc_enabled())) {
+               static_branch_enable(&_page_poisoning_enabled);
+               page_poisoning_requested = true;
+               want_check_pages = true;
+       }
+#endif
+
+       if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
+           page_poisoning_requested) {
+               pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
+                       "will take precedence over init_on_alloc and init_on_free\n");
+               _init_on_alloc_enabled_early = false;
+               _init_on_free_enabled_early = false;
+       }
+
+       if (_init_on_alloc_enabled_early) {
+               want_check_pages = true;
+               static_branch_enable(&init_on_alloc);
+       } else {
+               static_branch_disable(&init_on_alloc);
+       }
+
+       if (_init_on_free_enabled_early) {
+               want_check_pages = true;
+               static_branch_enable(&init_on_free);
+       } else {
+               static_branch_disable(&init_on_free);
+       }
+
+       if (IS_ENABLED(CONFIG_KMSAN) &&
+           (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
+               pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       if (debug_pagealloc_enabled()) {
+               want_check_pages = true;
+               static_branch_enable(&_debug_pagealloc_enabled);
+
+               if (debug_guardpage_minorder())
+                       static_branch_enable(&_debug_guardpage_enabled);
+       }
+#endif
+
+       /*
+        * Any page debugging or hardening option also enables sanity checking
+        * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
+        * enabled already.
+        */
+       if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
+               static_branch_enable(&check_pages_enabled);
+}
+
 /* Report memory auto-initialization states for this boot. */
 static void __init report_meminit(void)
 {
@@ -2570,7 +2659,7 @@ void __init mm_core_init(void)
         * bigger than MAX_ORDER unless SPARSEMEM.
         */
        page_ext_init_flatmem();
-       init_mem_debugging_and_hardening();
+       mem_debugging_and_hardening_init();
        kfence_alloc_pool();
        report_meminit();
        kmsan_init_shadow();
index 1131b87..94bf3b7 100644 (file)
@@ -240,31 +240,6 @@ EXPORT_SYMBOL(init_on_alloc);
 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
 EXPORT_SYMBOL(init_on_free);
 
-/* perform sanity checks on struct pages being allocated or freed */
-static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
-
-static inline bool is_check_pages_enabled(void)
-{
-       return static_branch_unlikely(&check_pages_enabled);
-}
-
-static bool _init_on_alloc_enabled_early __read_mostly
-                               = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
-static int __init early_init_on_alloc(char *buf)
-{
-
-       return kstrtobool(buf, &_init_on_alloc_enabled_early);
-}
-early_param("init_on_alloc", early_init_on_alloc);
-
-static bool _init_on_free_enabled_early __read_mostly
-                               = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
-static int __init early_init_on_free(char *buf)
-{
-       return kstrtobool(buf, &_init_on_free_enabled_early);
-}
-early_param("init_on_free", early_init_on_free);
-
 /*
  * A cached value of the page's pageblock's migratetype, used when the page is
  * put on a pcplist. Used to avoid the pageblock migratetype lookup when
@@ -798,76 +773,6 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
                                unsigned int order, int migratetype) {}
 #endif
 
-/*
- * Enable static keys related to various memory debugging and hardening options.
- * Some override others, and depend on early params that are evaluated in the
- * order of appearance. So we need to first gather the full picture of what was
- * enabled, and then make decisions.
- */
-void __init init_mem_debugging_and_hardening(void)
-{
-       bool page_poisoning_requested = false;
-       bool want_check_pages = false;
-
-#ifdef CONFIG_PAGE_POISONING
-       /*
-        * Page poisoning is debug page alloc for some arches. If
-        * either of those options are enabled, enable poisoning.
-        */
-       if (page_poisoning_enabled() ||
-            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
-             debug_pagealloc_enabled())) {
-               static_branch_enable(&_page_poisoning_enabled);
-               page_poisoning_requested = true;
-               want_check_pages = true;
-       }
-#endif
-
-       if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
-           page_poisoning_requested) {
-               pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
-                       "will take precedence over init_on_alloc and init_on_free\n");
-               _init_on_alloc_enabled_early = false;
-               _init_on_free_enabled_early = false;
-       }
-
-       if (_init_on_alloc_enabled_early) {
-               want_check_pages = true;
-               static_branch_enable(&init_on_alloc);
-       } else {
-               static_branch_disable(&init_on_alloc);
-       }
-
-       if (_init_on_free_enabled_early) {
-               want_check_pages = true;
-               static_branch_enable(&init_on_free);
-       } else {
-               static_branch_disable(&init_on_free);
-       }
-
-       if (IS_ENABLED(CONFIG_KMSAN) &&
-           (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
-               pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       if (debug_pagealloc_enabled()) {
-               want_check_pages = true;
-               static_branch_enable(&_debug_pagealloc_enabled);
-
-               if (debug_guardpage_minorder())
-                       static_branch_enable(&_debug_guardpage_enabled);
-       }
-#endif
-
-       /*
-        * Any page debugging or hardening option also enables sanity checking
-        * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's
-        * enabled already.
-        */
-       if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages)
-               static_branch_enable(&check_pages_enabled);
-}
-
 static inline void set_buddy_order(struct page *page, unsigned int order)
 {
        set_page_private(page, order);