OSDN Git Service

hugetlb: allow to free gigantic pages regardless of the configuration
authorAlexandre Ghiti <alex@ghiti.fr>
Tue, 14 May 2019 00:19:04 +0000 (17:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 14 May 2019 16:47:47 +0000 (09:47 -0700)
On systems without CONTIG_ALLOC activated but that support gigantic pages,
boottime reserved gigantic pages can not be freed at all.  This patch
simply enables the possibility to hand back those pages to memory
allocator.

Link: http://lkml.kernel.org/r/20190327063626.18421-5-alex@ghiti.fr
Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
Acked-by: David S. Miller <davem@davemloft.net> [sparc]
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Andy Lutomirsky <luto@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
14 files changed:
arch/arm64/Kconfig
arch/arm64/include/asm/hugetlb.h
arch/powerpc/include/asm/book3s/64/hugetlb.h
arch/powerpc/platforms/Kconfig.cputype
arch/s390/Kconfig
arch/s390/include/asm/hugetlb.h
arch/sh/Kconfig
arch/sparc/Kconfig
arch/x86/Kconfig
arch/x86/include/asm/hugetlb.h
include/asm-generic/hugetlb.h
include/linux/gfp.h
mm/hugetlb.c
mm/page_alloc.c

index 7f7fbd8..7a1aa53 100644 (file)
@@ -19,7 +19,7 @@ config ARM64
        select ARCH_HAS_FAST_MULTIPLIER
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_GCOV_PROFILE_ALL
-       select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC
+       select ARCH_HAS_GIGANTIC_PAGE
        select ARCH_HAS_KCOV
        select ARCH_HAS_KEEPINITRD
        select ARCH_HAS_MEMBARRIER_SYNC_CORE
index c6a07a3..4aad638 100644 (file)
@@ -70,8 +70,4 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 
 #include <asm-generic/hugetlb.h>
 
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static inline bool gigantic_page_supported(void) { return true; }
-#endif
-
 #endif /* __ASM_HUGETLB_H */
index 56140d1..12e150e 100644 (file)
@@ -36,8 +36,8 @@ static inline int hstate_get_psize(struct hstate *hstate)
        }
 }
 
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static inline bool gigantic_page_supported(void)
+#define __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
+static inline bool gigantic_page_runtime_supported(void)
 {
        /*
         * We used gigantic page reservation with hypervisor assist in some case.
@@ -49,7 +49,6 @@ static inline bool gigantic_page_supported(void)
 
        return true;
 }
-#endif
 
 /* hugepd entry valid bit */
 #define HUGEPD_VAL_BITS                (0x8000000000000000UL)
index 3a31d42..2794235 100644 (file)
@@ -331,7 +331,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
 config PPC_RADIX_MMU
        bool "Radix MMU Support"
        depends on PPC_BOOK3S_64 && HUGETLB_PAGE
-       select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC
+       select ARCH_HAS_GIGANTIC_PAGE
        select PPC_HAVE_KUEP
        select PPC_HAVE_KUAP
        default y
index 724dbc6..d0c046a 100644 (file)
@@ -63,7 +63,7 @@ config S390
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_GCOV_PROFILE_ALL
-       select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC
+       select ARCH_HAS_GIGANTIC_PAGE
        select ARCH_HAS_KCOV
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_SET_MEMORY
index 2d1afa5..bb59dd9 100644 (file)
@@ -116,7 +116,9 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
        return pte_modify(pte, newprot);
 }
 
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static inline bool gigantic_page_supported(void) { return true; }
-#endif
+static inline bool gigantic_page_runtime_supported(void)
+{
+       return true;
+}
+
 #endif /* _ASM_S390_HUGETLB_H */
index 2a5ec64..2a77033 100644 (file)
@@ -53,7 +53,7 @@ config SUPERH
        select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_NMI
        select NEED_SG_DMA_LENGTH
-       select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC
+       select ARCH_HAS_GIGANTIC_PAGE
 
        help
          The SuperH is a RISC processor targeted for use in embedded systems
index 566de73..7c93f31 100644 (file)
@@ -92,7 +92,7 @@ config SPARC64
        select ARCH_CLOCKSOURCE_DATA
        select ARCH_HAS_PTE_SPECIAL
        select PCI_DOMAINS if PCI
-       select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC
+       select ARCH_HAS_GIGANTIC_PAGE
 
 config ARCH_DEFCONFIG
        string
index 526d95a..f21bc56 100644 (file)
@@ -22,7 +22,7 @@ config X86_64
        def_bool y
        depends on 64BIT
        # Options that are inherently 64-bit kernel only:
-       select ARCH_HAS_GIGANTIC_PAGE if CONTIG_ALLOC
+       select ARCH_HAS_GIGANTIC_PAGE
        select ARCH_SUPPORTS_INT128
        select ARCH_USE_CMPXCHG_LOCKREF
        select HAVE_ARCH_SOFT_DIRTY
index 7469d32..f65cfb4 100644 (file)
@@ -17,8 +17,4 @@ static inline void arch_clear_hugepage_flags(struct page *page)
 {
 }
 
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static inline bool gigantic_page_supported(void) { return true; }
-#endif
-
 #endif /* _ASM_X86_HUGETLB_H */
index 71d7b77..822f433 100644 (file)
@@ -126,4 +126,11 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
 }
 #endif
 
+#ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
+static inline bool gigantic_page_runtime_supported(void)
+{
+       return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE);
+}
+#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */
+
 #endif /* _ASM_GENERIC_HUGETLB_H */
index e77ab30..fb07b50 100644 (file)
@@ -589,8 +589,8 @@ static inline bool pm_suspended_storage(void)
 /* The below functions must be run on a range from a single zone. */
 extern int alloc_contig_range(unsigned long start, unsigned long end,
                              unsigned migratetype, gfp_t gfp_mask);
-extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
 #endif
+void free_contig_range(unsigned long pfn, unsigned int nr_pages);
 
 #ifdef CONFIG_CMA
 /* CMA stuff */
index dffe5d9..2f901a6 100644 (file)
@@ -1059,6 +1059,7 @@ static void free_gigantic_page(struct page *page, unsigned int order)
        free_contig_range(page_to_pfn(page), 1 << order);
 }
 
+#ifdef CONFIG_CONTIG_ALLOC
 static int __alloc_gigantic_page(unsigned long start_pfn,
                                unsigned long nr_pages, gfp_t gfp_mask)
 {
@@ -1143,11 +1144,20 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
 
 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
+#else /* !CONFIG_CONTIG_ALLOC */
+static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
+                                       int nid, nodemask_t *nodemask)
+{
+       return NULL;
+}
+#endif /* CONFIG_CONTIG_ALLOC */
 
 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
-static inline bool gigantic_page_supported(void) { return false; }
 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
-               int nid, nodemask_t *nodemask) { return NULL; }
+                                       int nid, nodemask_t *nodemask)
+{
+       return NULL;
+}
 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
 static inline void destroy_compound_gigantic_page(struct page *page,
                                                unsigned int order) { }
@@ -1157,7 +1167,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
 {
        int i;
 
-       if (hstate_is_gigantic(h) && !gigantic_page_supported())
+       if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
                return;
 
        h->nr_huge_pages--;
@@ -2278,13 +2288,27 @@ found:
 }
 
 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
-                                               nodemask_t *nodes_allowed)
+static int set_max_huge_pages(struct hstate *h, unsigned long count,
+                             nodemask_t *nodes_allowed)
 {
        unsigned long min_count, ret;
 
-       if (hstate_is_gigantic(h) && !gigantic_page_supported())
-               return h->max_huge_pages;
+       spin_lock(&hugetlb_lock);
+
+       /*
+        * Gigantic pages runtime allocation depend on the capability for large
+        * page range allocation.
+        * If the system does not provide this feature, return an error when
+        * the user tries to allocate gigantic pages but let the user free the
+        * boottime allocated gigantic pages.
+        */
+       if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
+               if (count > persistent_huge_pages(h)) {
+                       spin_unlock(&hugetlb_lock);
+                       return -EINVAL;
+               }
+               /* Fall through to decrease pool */
+       }
 
        /*
         * Increase the pool size
@@ -2297,7 +2321,6 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
         * pool might be one hugepage larger than it needs to be, but
         * within all the constraints specified by the sysctls.
         */
-       spin_lock(&hugetlb_lock);
        while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
                if (!adjust_pool_surplus(h, nodes_allowed, -1))
                        break;
@@ -2352,9 +2375,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
                        break;
        }
 out:
-       ret = persistent_huge_pages(h);
+       h->max_huge_pages = persistent_huge_pages(h);
        spin_unlock(&hugetlb_lock);
-       return ret;
+
+       return 0;
 }
 
 #define HSTATE_ATTR_RO(_name) \
@@ -2406,7 +2430,7 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
        int err;
        NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
 
-       if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
+       if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) {
                err = -EINVAL;
                goto out;
        }
@@ -2430,15 +2454,13 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
        } else
                nodes_allowed = &node_states[N_MEMORY];
 
-       h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
+       err = set_max_huge_pages(h, count, nodes_allowed);
 
+out:
        if (nodes_allowed != &node_states[N_MEMORY])
                NODEMASK_FREE(nodes_allowed);
 
-       return len;
-out:
-       NODEMASK_FREE(nodes_allowed);
-       return err;
+       return err ? err : len;
 }
 
 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
index 2efb652..4ea71bc 100644 (file)
@@ -8346,8 +8346,9 @@ done:
                                pfn_max_align_up(end), migratetype);
        return ret;
 }
+#endif /* CONFIG_CONTIG_ALLOC */
 
-void free_contig_range(unsigned long pfn, unsigned nr_pages)
+void free_contig_range(unsigned long pfn, unsigned int nr_pages)
 {
        unsigned int count = 0;
 
@@ -8359,7 +8360,6 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
        }
        WARN(count != 0, "%d pages are still in use!\n", count);
 }
-#endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 /*