3 #include <linux/kernel.h>
4 #include <linux/errno.h>
5 #include <linux/string.h>
6 #include <linux/types.h>
8 #include <linux/init.h>
9 #include <linux/spinlock.h>
12 #include <linux/uaccess.h>
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
18 __visible DEFINE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
21 * Get the real ppn from a address in kernel mapping.
22 * @param address The virtual adrress
23 * @return the physical address
25 static inline unsigned long get_pa_from_mapping (unsigned long address)
32 pgd = pgd_offset_k(address);
33 BUG_ON(pgd_none(*pgd) || pgd_large(*pgd));
35 pud = pud_offset(pgd, address);
36 BUG_ON(pud_none(*pud));
38 if (pud_large(*pud)) {
39 return (pud_pfn(*pud) << PAGE_SHIFT) | (address & ~PUD_PAGE_MASK);
42 pmd = pmd_offset(pud, address);
43 BUG_ON(pmd_none(*pmd));
45 if (pmd_large(*pmd)) {
46 return (pmd_pfn(*pmd) << PAGE_SHIFT) | (address & ~PMD_PAGE_MASK);
49 pte = pte_offset_kernel(pmd, address);
50 BUG_ON(pte_none(*pte));
52 return (pte_pfn(*pte) << PAGE_SHIFT) | (address & ~PAGE_MASK);
55 void _kaiser_copy (unsigned long start_addr, unsigned long size,
62 unsigned long address;
63 unsigned long end_addr = start_addr + size;
64 unsigned long target_address;
66 for (address = PAGE_ALIGN(start_addr - (PAGE_SIZE - 1));
67 address < PAGE_ALIGN(end_addr); address += PAGE_SIZE) {
68 target_address = get_pa_from_mapping(address);
70 pgd = native_get_shadow_pgd(pgd_offset_k(address));
72 BUG_ON(pgd_none(*pgd) && "All shadow pgds should be mapped at this time\n");
73 BUG_ON(pgd_large(*pgd));
75 pud = pud_offset(pgd, address);
77 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd_alloc_one(0, address))));
79 BUG_ON(pud_large(*pud));
81 pmd = pmd_offset(pud, address);
83 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte_alloc_one_kernel(0, address))));
85 BUG_ON(pmd_large(*pmd));
87 pte = pte_offset_kernel(pmd, address);
89 set_pte(pte, __pte(flags | target_address));
91 BUG_ON(__pa(pte_page(*pte)) != target_address);
96 // at first, add a pmd for every pgd entry in the shadowmem-kernel-part of the kernel mapping
97 static inline void __init _kaiser_init(void)
102 pgd = native_get_shadow_pgd(pgd_offset_k((unsigned long )0));
103 for (i = PTRS_PER_PGD / 2; i < PTRS_PER_PGD; i++) {
104 set_pgd(pgd + i, __pgd(_PAGE_TABLE |__pa(pud_alloc_one(0, 0))));
108 extern char __per_cpu_user_mapped_start[], __per_cpu_user_mapped_end[];
109 spinlock_t shadow_table_lock;
110 void __init kaiser_init(void)
113 spin_lock_init(&shadow_table_lock);
115 spin_lock(&shadow_table_lock);
119 for_each_possible_cpu(cpu) {
120 // map the per cpu user variables
122 (unsigned long) (__per_cpu_user_mapped_start + per_cpu_offset(cpu)),
123 (unsigned long) __per_cpu_user_mapped_end - (unsigned long) __per_cpu_user_mapped_start,
127 // map the entry/exit text section, which is responsible to switch between user- and kernel mode
129 (unsigned long) __entry_text_start,
130 (unsigned long) __entry_text_end - (unsigned long) __entry_text_start,
133 // the fixed map address of the idt_table
135 (unsigned long) idt_descr.address,
136 sizeof(gate_desc) * NR_VECTORS,
139 spin_unlock(&shadow_table_lock);
142 // add a mapping to the shadow-mapping, and synchronize the mappings
143 void kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags)
145 spin_lock(&shadow_table_lock);
146 _kaiser_copy(addr, size, flags);
147 spin_unlock(&shadow_table_lock);
150 extern void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end);
151 void kaiser_remove_mapping(unsigned long start, unsigned long size)
153 pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(start));
154 spin_lock(&shadow_table_lock);
156 unmap_pud_range(pgd, start, start + size);
157 } while (pgd++ != native_get_shadow_pgd(pgd_offset_k(start + size)));
158 spin_unlock(&shadow_table_lock);
160 #endif /* CONFIG_KAISER */