1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
12 #include <asm/pgtable-bits.h>
16 /* Page Upper Directory not used in RISC-V */
17 #include <asm-generic/pgtable-nopud.h>
19 #include <asm/tlbflush.h>
20 #include <linux/mm_types.h>
23 #include <asm/pgtable-64.h>
25 #include <asm/pgtable-32.h>
26 #endif /* CONFIG_64BIT */
29 /* Number of entries in the page global directory */
30 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
31 /* Number of entries in the page table */
32 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
34 /* Number of PGD entries that a user-mode program can use */
35 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
37 /* Page protection bits */
38 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
40 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
41 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
42 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
43 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
44 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
45 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
46 _PAGE_EXEC | _PAGE_WRITE)
48 #define PAGE_COPY PAGE_READ
49 #define PAGE_COPY_EXEC PAGE_EXEC
50 #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
51 #define PAGE_SHARED PAGE_WRITE
52 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
54 #define _PAGE_KERNEL (_PAGE_READ \
60 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
61 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
63 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
65 extern pgd_t swapper_pg_dir[];
67 /* MAP_PRIVATE permissions: xwr (copy-on-write) */
68 #define __P000 PAGE_NONE
69 #define __P001 PAGE_READ
70 #define __P010 PAGE_COPY
71 #define __P011 PAGE_COPY
72 #define __P100 PAGE_EXEC
73 #define __P101 PAGE_READ_EXEC
74 #define __P110 PAGE_COPY_EXEC
75 #define __P111 PAGE_COPY_READ_EXEC
77 /* MAP_SHARED permissions: xwr */
78 #define __S000 PAGE_NONE
79 #define __S001 PAGE_READ
80 #define __S010 PAGE_SHARED
81 #define __S011 PAGE_SHARED
82 #define __S100 PAGE_EXEC
83 #define __S101 PAGE_READ_EXEC
84 #define __S110 PAGE_SHARED_EXEC
85 #define __S111 PAGE_SHARED_EXEC
87 static inline int pmd_present(pmd_t pmd)
89 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
92 static inline int pmd_none(pmd_t pmd)
94 return (pmd_val(pmd) == 0);
97 static inline int pmd_bad(pmd_t pmd)
99 return !pmd_present(pmd);
102 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
107 static inline void pmd_clear(pmd_t *pmdp)
109 set_pmd(pmdp, __pmd(0));
112 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
114 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
117 static inline unsigned long _pgd_pfn(pgd_t pgd)
119 return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
122 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
124 /* Locate an entry in the page global directory */
125 static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
127 return mm->pgd + pgd_index(addr);
129 /* Locate an entry in the kernel page global directory */
130 #define pgd_offset_k(addr) pgd_offset(&init_mm, (addr))
132 static inline struct page *pmd_page(pmd_t pmd)
134 return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
137 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
139 return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
142 /* Yields the page frame number (PFN) of a page table entry */
143 static inline unsigned long pte_pfn(pte_t pte)
145 return (pte_val(pte) >> _PAGE_PFN_SHIFT);
148 #define pte_page(x) pfn_to_page(pte_pfn(x))
150 /* Constructs a page table entry */
151 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
153 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
156 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
158 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
160 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
162 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr);
165 #define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
166 #define pte_unmap(pte) ((void)(pte))
168 static inline int pte_present(pte_t pte)
170 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
173 static inline int pte_none(pte_t pte)
175 return (pte_val(pte) == 0);
178 static inline int pte_write(pte_t pte)
180 return pte_val(pte) & _PAGE_WRITE;
183 static inline int pte_exec(pte_t pte)
185 return pte_val(pte) & _PAGE_EXEC;
188 static inline int pte_huge(pte_t pte)
190 return pte_present(pte)
191 && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
194 static inline int pte_dirty(pte_t pte)
196 return pte_val(pte) & _PAGE_DIRTY;
199 static inline int pte_young(pte_t pte)
201 return pte_val(pte) & _PAGE_ACCESSED;
204 static inline int pte_special(pte_t pte)
206 return pte_val(pte) & _PAGE_SPECIAL;
209 /* static inline pte_t pte_rdprotect(pte_t pte) */
211 static inline pte_t pte_wrprotect(pte_t pte)
213 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
216 /* static inline pte_t pte_mkread(pte_t pte) */
218 static inline pte_t pte_mkwrite(pte_t pte)
220 return __pte(pte_val(pte) | _PAGE_WRITE);
223 /* static inline pte_t pte_mkexec(pte_t pte) */
225 static inline pte_t pte_mkdirty(pte_t pte)
227 return __pte(pte_val(pte) | _PAGE_DIRTY);
230 static inline pte_t pte_mkclean(pte_t pte)
232 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
235 static inline pte_t pte_mkyoung(pte_t pte)
237 return __pte(pte_val(pte) | _PAGE_ACCESSED);
240 static inline pte_t pte_mkold(pte_t pte)
242 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
245 static inline pte_t pte_mkspecial(pte_t pte)
247 return __pte(pte_val(pte) | _PAGE_SPECIAL);
250 static inline pte_t pte_mkhuge(pte_t pte)
255 /* Modify page protection bits */
256 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
258 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
261 #define pgd_ERROR(e) \
262 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
265 /* Commit new configuration to MMU hardware */
266 static inline void update_mmu_cache(struct vm_area_struct *vma,
267 unsigned long address, pte_t *ptep)
270 * The kernel assumes that TLBs don't cache invalid entries, but
271 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
272 * cache flush; it is necessary even after writing invalid entries.
273 * Relying on flush_tlb_fix_spurious_fault would suffice, but
274 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
276 local_flush_tlb_page(address);
279 #define __HAVE_ARCH_PTE_SAME
280 static inline int pte_same(pte_t pte_a, pte_t pte_b)
282 return pte_val(pte_a) == pte_val(pte_b);
286 * Certain architectures need to do special things when PTEs within
287 * a page table are directly modified. Thus, the following hook is
290 static inline void set_pte(pte_t *ptep, pte_t pteval)
295 void flush_icache_pte(pte_t pte);
297 static inline void set_pte_at(struct mm_struct *mm,
298 unsigned long addr, pte_t *ptep, pte_t pteval)
300 if (pte_present(pteval) && pte_exec(pteval))
301 flush_icache_pte(pteval);
303 set_pte(ptep, pteval);
306 static inline void pte_clear(struct mm_struct *mm,
307 unsigned long addr, pte_t *ptep)
309 set_pte_at(mm, addr, ptep, __pte(0));
312 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
313 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
314 unsigned long address, pte_t *ptep,
315 pte_t entry, int dirty)
317 if (!pte_same(*ptep, entry))
318 set_pte_at(vma->vm_mm, address, ptep, entry);
320 * update_mmu_cache will unconditionally execute, handling both
321 * the case that the PTE changed and the spurious fault case.
326 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
327 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
328 unsigned long address, pte_t *ptep)
330 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
333 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
334 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
335 unsigned long address,
338 if (!pte_young(*ptep))
340 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
343 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
344 static inline void ptep_set_wrprotect(struct mm_struct *mm,
345 unsigned long address, pte_t *ptep)
347 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
350 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
351 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
352 unsigned long address, pte_t *ptep)
355 * This comment is borrowed from x86, but applies equally to RISC-V:
357 * Clearing the accessed bit without a TLB flush
358 * doesn't cause data corruption. [ It could cause incorrect
359 * page aging and the (mistaken) reclaim of hot pages, but the
360 * chance of that should be relatively low. ]
362 * So as a performance optimization don't flush the TLB when
363 * clearing the accessed bit, it will eventually be flushed by
364 * a context switch or a VM operation anyway. [ In the rare
365 * event of it not getting flushed for a long time the delay
366 * shouldn't really matter because there's no real memory
367 * pressure for swapout to react to. ]
369 return ptep_test_and_clear_young(vma, address, ptep);
373 * Encode and decode a swap entry
375 * Format of swap PTE:
376 * bit 0: _PAGE_PRESENT (zero)
377 * bit 1: _PAGE_PROT_NONE (zero)
378 * bits 2 to 6: swap type
379 * bits 7 to XLEN-1: swap offset
381 #define __SWP_TYPE_SHIFT 2
382 #define __SWP_TYPE_BITS 5
383 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
384 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
386 #define MAX_SWAPFILES_CHECK() \
387 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
389 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
390 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
391 #define __swp_entry(type, offset) ((swp_entry_t) \
392 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
394 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
395 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
397 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
398 #define VMALLOC_END (PAGE_OFFSET - 1)
399 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
402 * Roughly size the vmemmap space to be large enough to fit enough
403 * struct pages to map half the virtual address space. Then
404 * position vmemmap directly below the VMALLOC region.
406 #define VMEMMAP_SHIFT \
407 (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
408 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
409 #define VMEMMAP_END (VMALLOC_START - 1)
410 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
412 #define vmemmap ((struct page *)VMEMMAP_START)
414 #define PCI_IO_SIZE SZ_16M
415 #define PCI_IO_END VMEMMAP_START
416 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
418 #define FIXADDR_TOP PCI_IO_START
420 #define FIXADDR_SIZE PMD_SIZE
422 #define FIXADDR_SIZE PGDIR_SIZE
424 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
427 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
428 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
431 #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
433 #define TASK_SIZE FIXADDR_START
436 #else /* CONFIG_MMU */
438 #define PAGE_KERNEL __pgprot(0)
439 #define swapper_pg_dir NULL
440 #define VMALLOC_START 0
442 #define TASK_SIZE 0xffffffffUL
444 #endif /* !CONFIG_MMU */
446 #define kern_addr_valid(addr) (1) /* FIXME */
448 extern void *dtb_early_va;
449 void setup_bootmem(void);
450 void paging_init(void);
452 #define FIRST_USER_ADDRESS 0
455 * ZERO_PAGE is a global shared page that is always zero,
456 * used for zero-mapped memory areas, etc.
458 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
459 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
461 #include <asm-generic/pgtable.h>
463 #endif /* !__ASSEMBLY__ */
465 #endif /* _ASM_RISCV_PGTABLE_H */