OSDN Git Service

riscv: allocate a complete page size for each page table
authorZong Li <zong.li@sifive.com>
Fri, 7 Feb 2020 09:52:44 +0000 (17:52 +0800)
committerPalmer Dabbelt <palmerdabbelt@google.com>
Mon, 24 Feb 2020 21:12:49 +0000 (13:12 -0800)
Each page table should be created by allocating a complete page size
for it. Otherwise, the content of the page table would be corrupted
somewhere through memory allocation which allocates the memory at the
middle of the page table for other use.

Signed-off-by: Zong Li <zong.li@sifive.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
arch/riscv/mm/kasan_init.c

index f0cc860..f8eaf7e 100644 (file)
@@ -46,29 +46,34 @@ asmlinkage void __init kasan_early_init(void)
 
 static void __init populate(void *start, void *end)
 {
-       unsigned long i;
+       unsigned long i, offset;
        unsigned long vaddr = (unsigned long)start & PAGE_MASK;
        unsigned long vend = PAGE_ALIGN((unsigned long)end);
        unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
+       unsigned long n_ptes =
+           ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
        unsigned long n_pmds =
-               (n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
-                                               n_pages / PTRS_PER_PTE;
+           ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
+
+       pte_t *pte =
+           memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+       pmd_t *pmd =
+           memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
        pgd_t *pgd = pgd_offset_k(vaddr);
-       pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
-       pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
 
        for (i = 0; i < n_pages; i++) {
                phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
-
-               set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+               set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
        }
 
-       for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
-               set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
+       for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
+               set_pmd(&pmd[i],
+                       pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
                                __pgprot(_PAGE_TABLE)));
 
-       for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
-               set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
+       for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
+               set_pgd(&pgd[i],
+                       pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
                                __pgprot(_PAGE_TABLE)));
 
        flush_tlb_all();