OSDN Git Service

net: dsa: mv88e6xxx: Fix masking of egress port
[tomoyo/tomoyo-test1.git] / arch / powerpc / mm / kasan / kasan_init_32.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #define DISABLE_BRANCH_PROFILING
4
5 #include <linux/kasan.h>
6 #include <linux/printk.h>
7 #include <linux/memblock.h>
8 #include <linux/moduleloader.h>
9 #include <linux/sched/task.h>
10 #include <linux/vmalloc.h>
11 #include <asm/pgalloc.h>
12 #include <asm/code-patching.h>
13 #include <mm/mmu_decl.h>
14
15 static pgprot_t __init kasan_prot_ro(void)
16 {
17         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
18                 return PAGE_READONLY;
19
20         return PAGE_KERNEL_RO;
21 }
22
23 static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
24 {
25         unsigned long va = (unsigned long)kasan_early_shadow_page;
26         phys_addr_t pa = __pa(kasan_early_shadow_page);
27         int i;
28
29         for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
30                 __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
31 }
32
33 static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
34 {
35         pmd_t *pmd;
36         unsigned long k_cur, k_next;
37         pte_t *new = NULL;
38
39         pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
40
41         for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
42                 k_next = pgd_addr_end(k_cur, k_end);
43                 if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
44                         continue;
45
46                 if (!new)
47                         new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
48
49                 if (!new)
50                         return -ENOMEM;
51                 kasan_populate_pte(new, PAGE_KERNEL);
52
53                 smp_wmb(); /* See comment in __pte_alloc */
54
55                 spin_lock(&init_mm.page_table_lock);
56                         /* Has another populated it ? */
57                 if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) {
58                         pmd_populate_kernel(&init_mm, pmd, new);
59                         new = NULL;
60                 }
61                 spin_unlock(&init_mm.page_table_lock);
62         }
63         return 0;
64 }
65
66 static int __init kasan_init_region(void *start, size_t size)
67 {
68         unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
69         unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
70         unsigned long k_cur;
71         int ret;
72         void *block;
73
74         ret = kasan_init_shadow_page_tables(k_start, k_end);
75         if (ret)
76                 return ret;
77
78         block = memblock_alloc(k_end - k_start, PAGE_SIZE);
79
80         for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
81                 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
82                 void *va = block + k_cur - k_start;
83                 pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
84
85                 if (!va)
86                         return -ENOMEM;
87
88                 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
89         }
90         flush_tlb_kernel_range(k_start, k_end);
91         return 0;
92 }
93
94 static void __init kasan_remap_early_shadow_ro(void)
95 {
96         pgprot_t prot = kasan_prot_ro();
97         unsigned long k_start = KASAN_SHADOW_START;
98         unsigned long k_end = KASAN_SHADOW_END;
99         unsigned long k_cur;
100         phys_addr_t pa = __pa(kasan_early_shadow_page);
101
102         kasan_populate_pte(kasan_early_shadow_pte, prot);
103
104         for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
105                 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
106                 pte_t *ptep = pte_offset_kernel(pmd, k_cur);
107
108                 if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
109                         continue;
110
111                 __set_pte_at(&init_mm, k_cur, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
112         }
113         flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
114 }
115
116 static void __init kasan_unmap_early_shadow_vmalloc(void)
117 {
118         unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
119         unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
120         unsigned long k_cur;
121         phys_addr_t pa = __pa(kasan_early_shadow_page);
122
123         if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
124                 int ret = kasan_init_shadow_page_tables(k_start, k_end);
125
126                 if (ret)
127                         panic("kasan: kasan_init_shadow_page_tables() failed");
128         }
129         for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
130                 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
131                 pte_t *ptep = pte_offset_kernel(pmd, k_cur);
132
133                 if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
134                         continue;
135
136                 __set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0);
137         }
138         flush_tlb_kernel_range(k_start, k_end);
139 }
140
141 void __init kasan_mmu_init(void)
142 {
143         int ret;
144         struct memblock_region *reg;
145
146         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
147                 ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
148
149                 if (ret)
150                         panic("kasan: kasan_init_shadow_page_tables() failed");
151         }
152
153         for_each_memblock(memory, reg) {
154                 phys_addr_t base = reg->base;
155                 phys_addr_t top = min(base + reg->size, total_lowmem);
156
157                 if (base >= top)
158                         continue;
159
160                 ret = kasan_init_region(__va(base), top - base);
161                 if (ret)
162                         panic("kasan: kasan_init_region() failed");
163         }
164 }
165
166 void __init kasan_init(void)
167 {
168         kasan_remap_early_shadow_ro();
169
170         clear_page(kasan_early_shadow_page);
171
172         /* At this point kasan is fully initialized. Enable error messages */
173         init_task.kasan_depth = 0;
174         pr_info("KASAN init done\n");
175 }
176
177 void __init kasan_late_init(void)
178 {
179         if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
180                 kasan_unmap_early_shadow_vmalloc();
181 }
182
183 #ifdef CONFIG_PPC_BOOK3S_32
184 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
185
186 static void __init kasan_early_hash_table(void)
187 {
188         unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash :
189                                                             __pa(early_hash);
190
191         modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
192         modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
193
194         Hash = (struct hash_pte *)early_hash;
195 }
196 #else
197 static void __init kasan_early_hash_table(void) {}
198 #endif
199
200 void __init kasan_early_init(void)
201 {
202         unsigned long addr = KASAN_SHADOW_START;
203         unsigned long end = KASAN_SHADOW_END;
204         unsigned long next;
205         pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
206
207         BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
208
209         kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
210
211         do {
212                 next = pgd_addr_end(addr, end);
213                 pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
214         } while (pmd++, addr = next, addr != end);
215
216         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
217                 kasan_early_hash_table();
218 }