OSDN Git Service

powerpc/mm: move __find_linux_pte() out of hugetlbpage.c
authorChristophe Leroy <christophe.leroy@c-s.fr>
Fri, 26 Apr 2019 05:59:41 +0000 (05:59 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 2 May 2019 15:20:23 +0000 (01:20 +1000)
__find_linux_pte() is the only function in hugetlbpage.c
which is compiled in regardless on CONFIG_HUGETLBPAGE

This patch moves it in pgtable.c.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/pgtable.c

index 5f67e7a..17915fc 100644 (file)
@@ -756,109 +756,6 @@ void flush_dcache_icache_hugepage(struct page *page)
 
 #endif /* CONFIG_HUGETLB_PAGE */
 
-/*
- * We have 4 cases for pgds and pmds:
- * (1) invalid (all zeroes)
- * (2) pointer to next table, as normal; bottom 6 bits == 0
- * (3) leaf pte for huge page _PAGE_PTE set
- * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
- *
- * So long as we atomically load page table pointers we are safe against teardown,
- * we can follow the address down to the the page and take a ref on it.
- * This function need to be called with interrupts disabled. We use this variant
- * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
- */
-pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
-                       bool *is_thp, unsigned *hpage_shift)
-{
-       pgd_t pgd, *pgdp;
-       pud_t pud, *pudp;
-       pmd_t pmd, *pmdp;
-       pte_t *ret_pte;
-       hugepd_t *hpdp = NULL;
-       unsigned pdshift = PGDIR_SHIFT;
-
-       if (hpage_shift)
-               *hpage_shift = 0;
-
-       if (is_thp)
-               *is_thp = false;
-
-       pgdp = pgdir + pgd_index(ea);
-       pgd  = READ_ONCE(*pgdp);
-       /*
-        * Always operate on the local stack value. This make sure the
-        * value don't get updated by a parallel THP split/collapse,
-        * page fault or a page unmap. The return pte_t * is still not
-        * stable. So should be checked there for above conditions.
-        */
-       if (pgd_none(pgd))
-               return NULL;
-       else if (pgd_huge(pgd)) {
-               ret_pte = (pte_t *) pgdp;
-               goto out;
-       } else if (is_hugepd(__hugepd(pgd_val(pgd))))
-               hpdp = (hugepd_t *)&pgd;
-       else {
-               /*
-                * Even if we end up with an unmap, the pgtable will not
-                * be freed, because we do an rcu free and here we are
-                * irq disabled
-                */
-               pdshift = PUD_SHIFT;
-               pudp = pud_offset(&pgd, ea);
-               pud  = READ_ONCE(*pudp);
-
-               if (pud_none(pud))
-                       return NULL;
-               else if (pud_huge(pud)) {
-                       ret_pte = (pte_t *) pudp;
-                       goto out;
-               } else if (is_hugepd(__hugepd(pud_val(pud))))
-                       hpdp = (hugepd_t *)&pud;
-               else {
-                       pdshift = PMD_SHIFT;
-                       pmdp = pmd_offset(&pud, ea);
-                       pmd  = READ_ONCE(*pmdp);
-                       /*
-                        * A hugepage collapse is captured by pmd_none, because
-                        * it mark the pmd none and do a hpte invalidate.
-                        */
-                       if (pmd_none(pmd))
-                               return NULL;
-
-                       if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
-                               if (is_thp)
-                                       *is_thp = true;
-                               ret_pte = (pte_t *) pmdp;
-                               goto out;
-                       }
-                       /*
-                        * pmd_large check below will handle the swap pmd pte
-                        * we need to do both the check because they are config
-                        * dependent.
-                        */
-                       if (pmd_huge(pmd) || pmd_large(pmd)) {
-                               ret_pte = (pte_t *) pmdp;
-                               goto out;
-                       } else if (is_hugepd(__hugepd(pmd_val(pmd))))
-                               hpdp = (hugepd_t *)&pmd;
-                       else
-                               return pte_offset_kernel(&pmd, ea);
-               }
-       }
-       if (!hpdp)
-               return NULL;
-
-       ret_pte = hugepte_offset(*hpdp, ea, pdshift);
-       pdshift = hugepd_shift(*hpdp);
-out:
-       if (hpage_shift)
-               *hpage_shift = pdshift;
-       return ret_pte;
-}
-EXPORT_SYMBOL_GPL(__find_linux_pte);
-
 int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
                unsigned long end, int write, struct page **pages, int *nr)
 {
index d3d61d2..9f4ccd1 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/tlb.h>
+#include <asm/hugetlb.h>
 
 static inline int is_exec_fault(void)
 {
@@ -299,3 +300,106 @@ unsigned long vmalloc_to_phys(void *va)
        return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
 }
 EXPORT_SYMBOL_GPL(vmalloc_to_phys);
+
+/*
+ * We have 4 cases for pgds and pmds:
+ * (1) invalid (all zeroes)
+ * (2) pointer to next table, as normal; bottom 6 bits == 0
+ * (3) leaf pte for huge page _PAGE_PTE set
+ * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
+ *
+ * So long as we atomically load page table pointers we are safe against teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ * This function need to be called with interrupts disabled. We use this variant
+ * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
+ */
+pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
+                       bool *is_thp, unsigned *hpage_shift)
+{
+       pgd_t pgd, *pgdp;
+       pud_t pud, *pudp;
+       pmd_t pmd, *pmdp;
+       pte_t *ret_pte;
+       hugepd_t *hpdp = NULL;
+       unsigned pdshift = PGDIR_SHIFT;
+
+       if (hpage_shift)
+               *hpage_shift = 0;
+
+       if (is_thp)
+               *is_thp = false;
+
+       pgdp = pgdir + pgd_index(ea);
+       pgd  = READ_ONCE(*pgdp);
+       /*
+        * Always operate on the local stack value. This make sure the
+        * value don't get updated by a parallel THP split/collapse,
+        * page fault or a page unmap. The return pte_t * is still not
+        * stable. So should be checked there for above conditions.
+        */
+       if (pgd_none(pgd))
+               return NULL;
+       else if (pgd_huge(pgd)) {
+               ret_pte = (pte_t *) pgdp;
+               goto out;
+       } else if (is_hugepd(__hugepd(pgd_val(pgd))))
+               hpdp = (hugepd_t *)&pgd;
+       else {
+               /*
+                * Even if we end up with an unmap, the pgtable will not
+                * be freed, because we do an rcu free and here we are
+                * irq disabled
+                */
+               pdshift = PUD_SHIFT;
+               pudp = pud_offset(&pgd, ea);
+               pud  = READ_ONCE(*pudp);
+
+               if (pud_none(pud))
+                       return NULL;
+               else if (pud_huge(pud)) {
+                       ret_pte = (pte_t *) pudp;
+                       goto out;
+               } else if (is_hugepd(__hugepd(pud_val(pud))))
+                       hpdp = (hugepd_t *)&pud;
+               else {
+                       pdshift = PMD_SHIFT;
+                       pmdp = pmd_offset(&pud, ea);
+                       pmd  = READ_ONCE(*pmdp);
+                       /*
+                        * A hugepage collapse is captured by pmd_none, because
+                        * it mark the pmd none and do a hpte invalidate.
+                        */
+                       if (pmd_none(pmd))
+                               return NULL;
+
+                       if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
+                               if (is_thp)
+                                       *is_thp = true;
+                               ret_pte = (pte_t *) pmdp;
+                               goto out;
+                       }
+                       /*
+                        * pmd_large check below will handle the swap pmd pte
+                        * we need to do both the check because they are config
+                        * dependent.
+                        */
+                       if (pmd_huge(pmd) || pmd_large(pmd)) {
+                               ret_pte = (pte_t *) pmdp;
+                               goto out;
+                       } else if (is_hugepd(__hugepd(pmd_val(pmd))))
+                               hpdp = (hugepd_t *)&pmd;
+                       else
+                               return pte_offset_kernel(&pmd, ea);
+               }
+       }
+       if (!hpdp)
+               return NULL;
+
+       ret_pte = hugepte_offset(*hpdp, ea, pdshift);
+       pdshift = hugepd_shift(*hpdp);
+out:
+       if (hpage_shift)
+               *hpage_shift = pdshift;
+       return ret_pte;
+}
+EXPORT_SYMBOL_GPL(__find_linux_pte);