OSDN Git Service

Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[uclinux-h8/linux.git] / arch / arm64 / mm / dump.c
1 /*
2  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3  * Debug helper to dump the current kernel pagetables of the system
4  * so that we can see what the various memory ranges are set to.
5  *
6  * Derived from x86 and arm implementation:
7  * (C) Copyright 2008 Intel Corporation
8  *
9  * Author: Arjan van de Ven <arjan@linux.intel.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; version 2
14  * of the License.
15  */
16 #include <linux/debugfs.h>
17 #include <linux/errno.h>
18 #include <linux/fs.h>
19 #include <linux/io.h>
20 #include <linux/init.h>
21 #include <linux/mm.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24
25 #include <asm/fixmap.h>
26 #include <asm/kasan.h>
27 #include <asm/memory.h>
28 #include <asm/pgtable.h>
29 #include <asm/pgtable-hwdef.h>
30 #include <asm/ptdump.h>
31
32 static const struct addr_marker address_markers[] = {
33 #ifdef CONFIG_KASAN
34         { KASAN_SHADOW_START,           "Kasan shadow start" },
35         { KASAN_SHADOW_END,             "Kasan shadow end" },
36 #endif
37         { MODULES_VADDR,                "Modules start" },
38         { MODULES_END,                  "Modules end" },
39         { VMALLOC_START,                "vmalloc() area" },
40         { VMALLOC_END,                  "vmalloc() end" },
41         { FIXADDR_START,                "Fixmap start" },
42         { FIXADDR_TOP,                  "Fixmap end" },
43         { PCI_IO_START,                 "PCI I/O start" },
44         { PCI_IO_END,                   "PCI I/O end" },
45 #ifdef CONFIG_SPARSEMEM_VMEMMAP
46         { VMEMMAP_START,                "vmemmap start" },
47         { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
48 #endif
49         { PAGE_OFFSET,                  "Linear mapping" },
50         { -1,                           NULL },
51 };
52
53 #define pt_dump_seq_printf(m, fmt, args...)     \
54 ({                                              \
55         if (m)                                  \
56                 seq_printf(m, fmt, ##args);     \
57 })
58
59 #define pt_dump_seq_puts(m, fmt)        \
60 ({                                      \
61         if (m)                          \
62                 seq_printf(m, fmt);     \
63 })
64
65 /*
66  * The page dumper groups page table entries of the same type into a single
67  * description. It uses pg_state to track the range information while
68  * iterating over the pte entries. When the continuity is broken it then
69  * dumps out a description of the range.
70  */
71 struct pg_state {
72         struct seq_file *seq;
73         const struct addr_marker *marker;
74         unsigned long start_address;
75         unsigned level;
76         u64 current_prot;
77         bool check_wx;
78         unsigned long wx_pages;
79         unsigned long uxn_pages;
80 };
81
82 struct prot_bits {
83         u64             mask;
84         u64             val;
85         const char      *set;
86         const char      *clear;
87 };
88
89 static const struct prot_bits pte_bits[] = {
90         {
91                 .mask   = PTE_VALID,
92                 .val    = PTE_VALID,
93                 .set    = " ",
94                 .clear  = "F",
95         }, {
96                 .mask   = PTE_USER,
97                 .val    = PTE_USER,
98                 .set    = "USR",
99                 .clear  = "   ",
100         }, {
101                 .mask   = PTE_RDONLY,
102                 .val    = PTE_RDONLY,
103                 .set    = "ro",
104                 .clear  = "RW",
105         }, {
106                 .mask   = PTE_PXN,
107                 .val    = PTE_PXN,
108                 .set    = "NX",
109                 .clear  = "x ",
110         }, {
111                 .mask   = PTE_SHARED,
112                 .val    = PTE_SHARED,
113                 .set    = "SHD",
114                 .clear  = "   ",
115         }, {
116                 .mask   = PTE_AF,
117                 .val    = PTE_AF,
118                 .set    = "AF",
119                 .clear  = "  ",
120         }, {
121                 .mask   = PTE_NG,
122                 .val    = PTE_NG,
123                 .set    = "NG",
124                 .clear  = "  ",
125         }, {
126                 .mask   = PTE_CONT,
127                 .val    = PTE_CONT,
128                 .set    = "CON",
129                 .clear  = "   ",
130         }, {
131                 .mask   = PTE_TABLE_BIT,
132                 .val    = PTE_TABLE_BIT,
133                 .set    = "   ",
134                 .clear  = "BLK",
135         }, {
136                 .mask   = PTE_UXN,
137                 .val    = PTE_UXN,
138                 .set    = "UXN",
139         }, {
140                 .mask   = PTE_ATTRINDX_MASK,
141                 .val    = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
142                 .set    = "DEVICE/nGnRnE",
143         }, {
144                 .mask   = PTE_ATTRINDX_MASK,
145                 .val    = PTE_ATTRINDX(MT_DEVICE_nGnRE),
146                 .set    = "DEVICE/nGnRE",
147         }, {
148                 .mask   = PTE_ATTRINDX_MASK,
149                 .val    = PTE_ATTRINDX(MT_DEVICE_GRE),
150                 .set    = "DEVICE/GRE",
151         }, {
152                 .mask   = PTE_ATTRINDX_MASK,
153                 .val    = PTE_ATTRINDX(MT_NORMAL_NC),
154                 .set    = "MEM/NORMAL-NC",
155         }, {
156                 .mask   = PTE_ATTRINDX_MASK,
157                 .val    = PTE_ATTRINDX(MT_NORMAL),
158                 .set    = "MEM/NORMAL",
159         }
160 };
161
162 struct pg_level {
163         const struct prot_bits *bits;
164         const char *name;
165         size_t num;
166         u64 mask;
167 };
168
169 static struct pg_level pg_level[] = {
170         {
171         }, { /* pgd */
172                 .name   = "PGD",
173                 .bits   = pte_bits,
174                 .num    = ARRAY_SIZE(pte_bits),
175         }, { /* pud */
176                 .name   = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
177                 .bits   = pte_bits,
178                 .num    = ARRAY_SIZE(pte_bits),
179         }, { /* pmd */
180                 .name   = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
181                 .bits   = pte_bits,
182                 .num    = ARRAY_SIZE(pte_bits),
183         }, { /* pte */
184                 .name   = "PTE",
185                 .bits   = pte_bits,
186                 .num    = ARRAY_SIZE(pte_bits),
187         },
188 };
189
190 static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
191                         size_t num)
192 {
193         unsigned i;
194
195         for (i = 0; i < num; i++, bits++) {
196                 const char *s;
197
198                 if ((st->current_prot & bits->mask) == bits->val)
199                         s = bits->set;
200                 else
201                         s = bits->clear;
202
203                 if (s)
204                         pt_dump_seq_printf(st->seq, " %s", s);
205         }
206 }
207
208 static void note_prot_uxn(struct pg_state *st, unsigned long addr)
209 {
210         if (!st->check_wx)
211                 return;
212
213         if ((st->current_prot & PTE_UXN) == PTE_UXN)
214                 return;
215
216         WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
217                   (void *)st->start_address, (void *)st->start_address);
218
219         st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
220 }
221
222 static void note_prot_wx(struct pg_state *st, unsigned long addr)
223 {
224         if (!st->check_wx)
225                 return;
226         if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
227                 return;
228         if ((st->current_prot & PTE_PXN) == PTE_PXN)
229                 return;
230
231         WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
232                   (void *)st->start_address, (void *)st->start_address);
233
234         st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
235 }
236
237 static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
238                                 u64 val)
239 {
240         static const char units[] = "KMGTPE";
241         u64 prot = val & pg_level[level].mask;
242
243         if (!st->level) {
244                 st->level = level;
245                 st->current_prot = prot;
246                 st->start_address = addr;
247                 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
248         } else if (prot != st->current_prot || level != st->level ||
249                    addr >= st->marker[1].start_address) {
250                 const char *unit = units;
251                 unsigned long delta;
252
253                 if (st->current_prot) {
254                         note_prot_uxn(st, addr);
255                         note_prot_wx(st, addr);
256                         pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
257                                    st->start_address, addr);
258
259                         delta = (addr - st->start_address) >> 10;
260                         while (!(delta & 1023) && unit[1]) {
261                                 delta >>= 10;
262                                 unit++;
263                         }
264                         pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
265                                    pg_level[st->level].name);
266                         if (pg_level[st->level].bits)
267                                 dump_prot(st, pg_level[st->level].bits,
268                                           pg_level[st->level].num);
269                         pt_dump_seq_puts(st->seq, "\n");
270                 }
271
272                 if (addr >= st->marker[1].start_address) {
273                         st->marker++;
274                         pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
275                 }
276
277                 st->start_address = addr;
278                 st->current_prot = prot;
279                 st->level = level;
280         }
281
282         if (addr >= st->marker[1].start_address) {
283                 st->marker++;
284                 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
285         }
286
287 }
288
289 static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
290                      unsigned long end)
291 {
292         unsigned long addr = start;
293         pte_t *ptep = pte_offset_kernel(pmdp, start);
294
295         do {
296                 note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
297         } while (ptep++, addr += PAGE_SIZE, addr != end);
298 }
299
300 static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
301                      unsigned long end)
302 {
303         unsigned long next, addr = start;
304         pmd_t *pmdp = pmd_offset(pudp, start);
305
306         do {
307                 pmd_t pmd = READ_ONCE(*pmdp);
308                 next = pmd_addr_end(addr, end);
309
310                 if (pmd_none(pmd) || pmd_sect(pmd)) {
311                         note_page(st, addr, 3, pmd_val(pmd));
312                 } else {
313                         BUG_ON(pmd_bad(pmd));
314                         walk_pte(st, pmdp, addr, next);
315                 }
316         } while (pmdp++, addr = next, addr != end);
317 }
318
319 static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
320                      unsigned long end)
321 {
322         unsigned long next, addr = start;
323         pud_t *pudp = pud_offset(pgdp, start);
324
325         do {
326                 pud_t pud = READ_ONCE(*pudp);
327                 next = pud_addr_end(addr, end);
328
329                 if (pud_none(pud) || pud_sect(pud)) {
330                         note_page(st, addr, 2, pud_val(pud));
331                 } else {
332                         BUG_ON(pud_bad(pud));
333                         walk_pmd(st, pudp, addr, next);
334                 }
335         } while (pudp++, addr = next, addr != end);
336 }
337
338 static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
339                      unsigned long start)
340 {
341         unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
342         unsigned long next, addr = start;
343         pgd_t *pgdp = pgd_offset(mm, start);
344
345         do {
346                 pgd_t pgd = READ_ONCE(*pgdp);
347                 next = pgd_addr_end(addr, end);
348
349                 if (pgd_none(pgd)) {
350                         note_page(st, addr, 1, pgd_val(pgd));
351                 } else {
352                         BUG_ON(pgd_bad(pgd));
353                         walk_pud(st, pgdp, addr, next);
354                 }
355         } while (pgdp++, addr = next, addr != end);
356 }
357
358 void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
359 {
360         struct pg_state st = {
361                 .seq = m,
362                 .marker = info->markers,
363         };
364
365         walk_pgd(&st, info->mm, info->base_addr);
366
367         note_page(&st, 0, 0, 0);
368 }
369
370 static void ptdump_initialize(void)
371 {
372         unsigned i, j;
373
374         for (i = 0; i < ARRAY_SIZE(pg_level); i++)
375                 if (pg_level[i].bits)
376                         for (j = 0; j < pg_level[i].num; j++)
377                                 pg_level[i].mask |= pg_level[i].bits[j].mask;
378 }
379
380 static struct ptdump_info kernel_ptdump_info = {
381         .mm             = &init_mm,
382         .markers        = address_markers,
383         .base_addr      = VA_START,
384 };
385
386 void ptdump_check_wx(void)
387 {
388         struct pg_state st = {
389                 .seq = NULL,
390                 .marker = (struct addr_marker[]) {
391                         { 0, NULL},
392                         { -1, NULL},
393                 },
394                 .check_wx = true,
395         };
396
397         walk_pgd(&st, &init_mm, VA_START);
398         note_page(&st, 0, 0, 0);
399         if (st.wx_pages || st.uxn_pages)
400                 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
401                         st.wx_pages, st.uxn_pages);
402         else
403                 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
404 }
405
406 static int ptdump_init(void)
407 {
408         ptdump_initialize();
409         return ptdump_debugfs_register(&kernel_ptdump_info,
410                                         "kernel_page_tables");
411 }
412 device_initcall(ptdump_init);