OSDN Git Service

powerpc/32: Don't populate page tables for block mapped pages except on the 8xx.
authorChristophe Leroy <christophe.leroy@c-s.fr>
Fri, 23 Aug 2019 09:56:21 +0000 (09:56 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 18 Nov 2019 11:27:52 +0000 (22:27 +1100)
Commit d2f15e0979ee ("powerpc/32: always populate page tables for
Abatron BDI.") wrongly sets page tables for any PPC32 for using BDI,
and does't update them after init (remove RX on init section, set
text and rodata read-only)

Only the 8xx requires page tables to be populated for using the BDI.
They also need to be populated in order to see the mappings in
/sys/kernel/debug/kernel_page_tables

On BOOK3S_32, pages that are not mapped by page tables are mapped
by BATs. The BDI knows BATs and they can be viewed in
/sys/kernel/debug/powerpc/block_address_translation

Only set pagetables for RAM and IMMR on the 8xx and properly update
them at the end of init.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c8610942203e0d93fcb02ad20c57edd3adb4c9d3.1566554029.git.christophe.leroy@c-s.fr
arch/powerpc/mm/nohash/8xx.c
arch/powerpc/mm/pgtable_32.c

index 4a06cb3..090af2d 100644 (file)
@@ -103,6 +103,19 @@ static void mmu_patch_addis(s32 *site, long simm)
        patch_instruction_site(site, instr);
 }
 
+void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
+{
+       unsigned long s = offset;
+       unsigned long v = PAGE_OFFSET + s;
+       phys_addr_t p = memstart_addr + s;
+
+       for (; s < top; s += PAGE_SIZE) {
+               map_kernel_page(v, p, prot);
+               v += PAGE_SIZE;
+               p += PAGE_SIZE;
+       }
+}
+
 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 {
        unsigned long mapped;
@@ -115,10 +128,20 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
                if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
                        mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
        } else {
+               unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
                mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
                if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
-                       mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top,
-                                           _ALIGN(__pa(_einittext), 8 << 20));
+                       mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, einittext8);
+
+               /*
+                * Populate page tables to:
+                * - have them appear in /sys/kernel/debug/kernel_page_tables
+                * - allow the BDI to find the pages when they are not PINNED
+                */
+               mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
+               mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
+               mmu_mapin_immr();
        }
 
        mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
@@ -144,18 +167,41 @@ void mmu_mark_initmem_nx(void)
        if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
                mmu_patch_addis(&patch__itlbmiss_linmem_top8,
                                -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
-       if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
+       if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) {
+               unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+               unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+               unsigned long etext = __pa(_etext);
+
                mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
+
+               /* Update page tables for PTDUMP and BDI */
+               mmu_mapin_ram_chunk(0, einittext8, __pgprot(0));
+               if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
+                       mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_TEXT);
+                       mmu_mapin_ram_chunk(etext, einittext8, PAGE_KERNEL);
+               } else {
+                       mmu_mapin_ram_chunk(0, etext8, PAGE_KERNEL_TEXT);
+                       mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
+               }
+       }
 }
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
 void mmu_mark_rodata_ro(void)
 {
+       unsigned long sinittext = __pa(_sinittext);
+       unsigned long etext = __pa(_etext);
+
        if (CONFIG_DATA_SHIFT < 23)
                mmu_patch_addis(&patch__dtlbmiss_romem_top8,
                                -__pa(((unsigned long)_sinittext) &
                                      ~(LARGE_PAGE_SIZE_8M - 1)));
        mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
+
+       /* Update page tables for PTDUMP and BDI */
+       mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
+       mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
+       mmu_mapin_ram_chunk(etext, sinittext, PAGE_KERNEL_RO);
 }
 #endif
 
index 8ec5dfb..73b8416 100644 (file)
@@ -117,10 +117,7 @@ void __init mapin_ram(void)
                if (base >= top)
                        continue;
                base = mmu_mapin_ram(base, top);
-               if (IS_ENABLED(CONFIG_BDI_SWITCH))
-                       __mapin_ram_chunk(reg->base, top);
-               else
-                       __mapin_ram_chunk(base, top);
+               __mapin_ram_chunk(base, top);
        }
 }