OSDN Git Service

cf1bb922d467d13ba0322f9313146da6cbb286a3
[android-x86/kernel.git] / arch / x86 / mm / kaiser.c
1
2
3 #include <linux/kernel.h>
4 #include <linux/errno.h>
5 #include <linux/string.h>
6 #include <linux/types.h>
7 #include <linux/bug.h>
8 #include <linux/init.h>
9 #include <linux/spinlock.h>
10 #include <linux/mm.h>
11
12 #include <linux/uaccess.h>
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/desc.h>
16 #ifdef CONFIG_KAISER
17
18 __visible DEFINE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
19
20 /**
21  * Get the real ppn from a address in kernel mapping.
22  * @param address The virtual adrress
23  * @return the physical address
24  */
25 static inline unsigned long get_pa_from_mapping (unsigned long address)
26 {
27         pgd_t *pgd;
28         pud_t *pud;
29         pmd_t *pmd;
30         pte_t *pte;
31
32         pgd = pgd_offset_k(address);
33         BUG_ON(pgd_none(*pgd) || pgd_large(*pgd));
34
35         pud = pud_offset(pgd, address);
36         BUG_ON(pud_none(*pud));
37
38         if (pud_large(*pud)) {
39                 return (pud_pfn(*pud) << PAGE_SHIFT) | (address & ~PUD_PAGE_MASK);
40         }
41
42         pmd = pmd_offset(pud, address);
43         BUG_ON(pmd_none(*pmd));
44
45         if (pmd_large(*pmd)) {
46                 return (pmd_pfn(*pmd) << PAGE_SHIFT) | (address & ~PMD_PAGE_MASK);
47         }
48
49         pte = pte_offset_kernel(pmd, address);
50         BUG_ON(pte_none(*pte));
51
52         return (pte_pfn(*pte) << PAGE_SHIFT) | (address & ~PAGE_MASK);
53 }
54
55 void _kaiser_copy (unsigned long start_addr, unsigned long size,
56                                         unsigned long flags)
57 {
58         pgd_t *pgd;
59         pud_t *pud;
60         pmd_t *pmd;
61         pte_t *pte;
62         unsigned long address;
63         unsigned long end_addr = start_addr + size;
64         unsigned long target_address;
65
66         for (address = PAGE_ALIGN(start_addr - (PAGE_SIZE - 1));
67                         address < PAGE_ALIGN(end_addr); address += PAGE_SIZE) {
68                 target_address = get_pa_from_mapping(address);
69
70                 pgd = native_get_shadow_pgd(pgd_offset_k(address));
71
72                 BUG_ON(pgd_none(*pgd) && "All shadow pgds should be mapped at this time\n");
73                 BUG_ON(pgd_large(*pgd));
74
75                 pud = pud_offset(pgd, address);
76                 if (pud_none(*pud)) {
77                         set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd_alloc_one(0, address))));
78                 }
79                 BUG_ON(pud_large(*pud));
80
81                 pmd = pmd_offset(pud, address);
82                 if (pmd_none(*pmd)) {
83                         set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte_alloc_one_kernel(0, address))));
84                 }
85                 BUG_ON(pmd_large(*pmd));
86
87                 pte = pte_offset_kernel(pmd, address);
88                 if (pte_none(*pte)) {
89                         set_pte(pte, __pte(flags | target_address));
90                 } else {
91                         BUG_ON(__pa(pte_page(*pte)) != target_address);
92                 }
93         }
94 }
95
96 // at first, add a pmd for every pgd entry in the shadowmem-kernel-part of the kernel mapping
97 static inline void __init _kaiser_init(void)
98 {
99         pgd_t *pgd;
100         int i = 0;
101
102         pgd = native_get_shadow_pgd(pgd_offset_k((unsigned long )0));
103         for (i = PTRS_PER_PGD / 2; i < PTRS_PER_PGD; i++) {
104                 set_pgd(pgd + i, __pgd(_PAGE_TABLE |__pa(pud_alloc_one(0, 0))));
105         }
106 }
107
108 extern char __per_cpu_user_mapped_start[], __per_cpu_user_mapped_end[];
109 spinlock_t shadow_table_lock;
110 void __init kaiser_init(void)
111 {
112         int cpu;
113         spin_lock_init(&shadow_table_lock);
114
115         spin_lock(&shadow_table_lock);
116
117         _kaiser_init();
118
119         for_each_possible_cpu(cpu) {
120                 // map the per cpu user variables
121                 _kaiser_copy(
122                                 (unsigned long) (__per_cpu_user_mapped_start + per_cpu_offset(cpu)),
123                                 (unsigned long) __per_cpu_user_mapped_end - (unsigned long) __per_cpu_user_mapped_start,
124                                 __PAGE_KERNEL);
125         }
126
127         // map the entry/exit text section, which is responsible to switch between user- and kernel mode
128         _kaiser_copy(
129                         (unsigned long) __entry_text_start,
130                         (unsigned long) __entry_text_end - (unsigned long) __entry_text_start,
131                         __PAGE_KERNEL_RX);
132
133         // the fixed map address of the idt_table
134         _kaiser_copy(
135                         (unsigned long) idt_descr.address,
136                         sizeof(gate_desc) * NR_VECTORS,
137                         __PAGE_KERNEL_RO);
138
139         spin_unlock(&shadow_table_lock);
140 }
141
142 // add a mapping to the shadow-mapping, and synchronize the mappings
143 void kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags)
144 {
145         spin_lock(&shadow_table_lock);
146         _kaiser_copy(addr, size, flags);
147         spin_unlock(&shadow_table_lock);
148 }
149
150 extern void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end);
151 void kaiser_remove_mapping(unsigned long start, unsigned long size)
152 {
153         pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(start));
154         spin_lock(&shadow_table_lock);
155         do {
156                 unmap_pud_range(pgd, start, start + size);
157         } while (pgd++ != native_get_shadow_pgd(pgd_offset_k(start + size)));
158         spin_unlock(&shadow_table_lock);
159 }
160 #endif /* CONFIG_KAISER */