OSDN Git Service

91f541a1bdb137753ad211ab7c52304e397dbae0
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / arch / arm64 / kernel / vdso.c
1 /*
2  * Additional userspace pages setup for AArch64 and AArch32.
3  *  - AArch64: vDSO pages setup, vDSO data page update.
4  *  - AArch32: sigreturn and kuser helpers pages setup.
5  *
6  * Copyright (C) 2012 ARM Limited
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  * Author: Will Deacon <will.deacon@arm.com>
21  */
22
23 #include <linux/kernel.h>
24 #include <linux/clocksource.h>
25 #include <linux/elf.h>
26 #include <linux/err.h>
27 #include <linux/errno.h>
28 #include <linux/gfp.h>
29 #include <linux/mm.h>
30 #include <linux/sched.h>
31 #include <linux/signal.h>
32 #include <linux/slab.h>
33 #include <linux/timekeeper_internal.h>
34 #include <linux/vmalloc.h>
35
36 #include <asm/cacheflush.h>
37 #include <asm/signal32.h>
38 #include <asm/vdso.h>
39 #include <asm/vdso_datapage.h>
40
41 struct vdso_mappings {
42         unsigned long num_code_pages;
43         struct vm_special_mapping data_mapping;
44         struct vm_special_mapping code_mapping;
45 };
46
47 /*
48  * The vDSO data page.
49  */
50 static union {
51         struct vdso_data        data;
52         u8                      page[PAGE_SIZE];
53 } vdso_data_store __page_aligned_data;
54 struct vdso_data *vdso_data = &vdso_data_store.data;
55
56 #ifdef CONFIG_COMPAT
57 /*
58  * Create and map the vectors page for AArch32 tasks.
59  */
60 #if !defined(CONFIG_VDSO32) || defined(CONFIG_KUSER_HELPERS)
61 static struct page *vectors_page[] __ro_after_init;
62 static const struct vm_special_mapping compat_vdso_spec[] = {
63         {
64                 /* Must be named [sigpage] for compatibility with arm. */
65                 .name   = "[sigpage]",
66                 .pages  = &vectors_page[0],
67         },
68 #ifdef CONFIG_KUSER_HELPERS
69         {
70                 .name   = "[kuserhelpers]",
71                 .pages  = &vectors_page[1],
72         },
73 #endif
74 };
75 static struct page *vectors_page[ARRAY_SIZE(compat_vdso_spec)] __ro_after_init;
76 #endif
77
78 static int __init alloc_vectors_page(void)
79 {
80 #ifdef CONFIG_KUSER_HELPERS
81         extern char __kuser_helper_start[], __kuser_helper_end[];
82         size_t kuser_sz = __kuser_helper_end - __kuser_helper_start;
83         unsigned long kuser_vpage;
84 #endif
85
86 #ifndef CONFIG_VDSO32
87         extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
88         size_t sigret_sz =
89                 __aarch32_sigret_code_end - __aarch32_sigret_code_start;
90         unsigned long sigret_vpage;
91
92         sigret_vpage = get_zeroed_page(GFP_ATOMIC);
93         if (!sigret_vpage)
94                 return -ENOMEM;
95 #endif
96
97 #ifdef CONFIG_KUSER_HELPERS
98         kuser_vpage = get_zeroed_page(GFP_ATOMIC);
99         if (!kuser_vpage) {
100 #ifndef CONFIG_VDSO32
101                 free_page(sigret_vpage);
102 #endif
103                 return -ENOMEM;
104         }
105 #endif
106
107 #ifndef CONFIG_VDSO32
108         /* sigreturn code */
109         memcpy((void *)sigret_vpage, __aarch32_sigret_code_start, sigret_sz);
110         flush_icache_range(sigret_vpage, sigret_vpage + PAGE_SIZE);
111         vectors_page[0] = virt_to_page(sigret_vpage);
112 #endif
113
114 #ifdef CONFIG_KUSER_HELPERS
115         /* kuser helpers */
116         memcpy((void *)kuser_vpage + 0x1000 - kuser_sz, __kuser_helper_start,
117                 kuser_sz);
118         flush_icache_range(kuser_vpage, kuser_vpage + PAGE_SIZE);
119         vectors_page[1] = virt_to_page(kuser_vpage);
120 #endif
121
122         return 0;
123 }
124 arch_initcall(alloc_vectors_page);
125
126 #ifndef CONFIG_VDSO32
127 int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
128 {
129         struct mm_struct *mm = current->mm;
130         unsigned long addr;
131         void *ret;
132
133         down_write(&mm->mmap_sem);
134         addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
135         if (IS_ERR_VALUE(addr)) {
136                 ret = ERR_PTR(addr);
137                 goto out;
138         }
139
140         ret = _install_special_mapping(mm, addr, PAGE_SIZE,
141                                        VM_READ|VM_EXEC|
142                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
143                                        &compat_vdso_spec[0]);
144         if (IS_ERR(ret))
145                 goto out;
146
147         current->mm->context.vdso = (void *)addr;
148
149 #ifdef CONFIG_KUSER_HELPERS
150         /* Map the kuser helpers at the ABI-defined high address. */
151         ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE,
152                                        PAGE_SIZE,
153                                        VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
154                                        &compat_vdso_spec[1]);
155 #endif
156 out:
157         up_write(&mm->mmap_sem);
158
159         return PTR_ERR_OR_ZERO(ret);
160 }
161 #endif /* !CONFIG_VDSO32 */
162 #endif /* CONFIG_COMPAT */
163
164 static int __init vdso_mappings_init(const char *name,
165                                      const char *code_start,
166                                      const char *code_end,
167                                      struct vdso_mappings *mappings)
168 {
169         unsigned long i, vdso_pages;
170         struct page **vdso_pagelist;
171         unsigned long pfn;
172
173         if (memcmp(code_start, "\177ELF", 4)) {
174                 pr_err("%s is not a valid ELF object!\n", name);
175                 return -EINVAL;
176         }
177
178         vdso_pages = (code_end - code_start) >> PAGE_SHIFT;
179         pr_info("%s: %ld pages (%ld code @ %p, %ld data @ %p)\n",
180                 name, vdso_pages + 1, vdso_pages, code_start, 1L,
181                 vdso_data);
182
183         /*
184          * Allocate space for storing pointers to the vDSO code pages + the
185          * data page. The pointers must have the same lifetime as the mappings,
186          * which are static, so there is no need to keep track of the pointer
187          * array to free it.
188          */
189         vdso_pagelist = kmalloc_array(vdso_pages + 1, sizeof(struct page *),
190                                       GFP_KERNEL);
191         if (vdso_pagelist == NULL)
192                 return -ENOMEM;
193
194         /* Grab the vDSO data page. */
195         vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
196
197         /* Grab the vDSO code pages. */
198         pfn = sym_to_pfn(code_start);
199
200         for (i = 0; i < vdso_pages; i++)
201                 vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
202
203         /* Populate the special mapping structures */
204         mappings->data_mapping = (struct vm_special_mapping) {
205                 .name   = "[vvar]",
206                 .pages  = &vdso_pagelist[0],
207         };
208
209         mappings->code_mapping = (struct vm_special_mapping) {
210                 .name   = "[vdso]",
211                 .pages  = &vdso_pagelist[1],
212         };
213
214         mappings->num_code_pages = vdso_pages;
215         return 0;
216 }
217
218 #ifdef CONFIG_COMPAT
219 #ifdef CONFIG_VDSO32
220
221 static struct vdso_mappings vdso32_mappings __ro_after_init;
222
223 static int __init vdso32_init(void)
224 {
225         extern char vdso32_start[], vdso32_end[];
226
227         return vdso_mappings_init("vdso32", vdso32_start, vdso32_end,
228                                   &vdso32_mappings);
229 }
230 arch_initcall(vdso32_init);
231
232 #endif /* CONFIG_VDSO32 */
233 #endif /* CONFIG_COMPAT */
234
235 static struct vdso_mappings vdso_mappings __ro_after_init;
236
237 static int __init vdso_init(void)
238 {
239         extern char vdso_start[], vdso_end[];
240
241         return vdso_mappings_init("vdso", vdso_start, vdso_end,
242                                   &vdso_mappings);
243 }
244
245 arch_initcall(vdso_init);
246
247 static int vdso_setup(struct mm_struct *mm,
248                       const struct vdso_mappings *mappings)
249 {
250         unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
251         void *ret;
252
253         vdso_text_len = mappings->num_code_pages << PAGE_SHIFT;
254         /* Be sure to map the data page */
255         vdso_mapping_len = vdso_text_len + PAGE_SIZE;
256
257         vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
258         if (IS_ERR_VALUE(vdso_base))
259                 return PTR_ERR_OR_ZERO(ERR_PTR(vdso_base));
260         ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
261                                        VM_READ|VM_MAYREAD,
262                                        &mappings->data_mapping);
263         if (IS_ERR(ret))
264                 return PTR_ERR_OR_ZERO(ret);
265
266         vdso_base += PAGE_SIZE;
267         ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
268                                        VM_READ|VM_EXEC|
269                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
270                                        &mappings->code_mapping);
271         if (!IS_ERR(ret))
272                 mm->context.vdso = (void *)vdso_base;
273         return PTR_ERR_OR_ZERO(ret);
274 }
275
276 #ifdef CONFIG_COMPAT
277 #ifdef CONFIG_VDSO32
278 int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
279 {
280         struct mm_struct *mm = current->mm;
281         void *ret;
282
283         down_write(&mm->mmap_sem);
284
285         ret = ERR_PTR(vdso_setup(mm, &vdso32_mappings));
286 #ifdef CONFIG_KUSER_HELPERS
287         if (!IS_ERR(ret))
288                 /* Map the kuser helpers at the ABI-defined high address. */
289                 ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE,
290                                                PAGE_SIZE,
291                                                VM_READ|VM_EXEC|
292                                                VM_MAYREAD|VM_MAYEXEC,
293                                                &compat_vdso_spec[1]);
294 #endif
295
296         up_write(&mm->mmap_sem);
297
298         return PTR_ERR_OR_ZERO(ret);
299 }
300 #endif /* CONFIG_VDSO32 */
301 #endif /* CONFIG_COMPAT */
302
303 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
304 {
305         struct mm_struct *mm = current->mm;
306         int ret;
307
308         down_write(&mm->mmap_sem);
309         ret = vdso_setup(mm, &vdso_mappings);
310         up_write(&mm->mmap_sem);
311         return ret;
312 }
313
314 /*
315  * Update the vDSO data page to keep in sync with kernel timekeeping.
316  */
317 void update_vsyscall(struct timekeeper *tk)
318 {
319         u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
320
321         ++vdso_data->tb_seq_count;
322         smp_wmb();
323
324         vdso_data->use_syscall                  = use_syscall;
325         vdso_data->xtime_coarse_sec             = tk->xtime_sec;
326         vdso_data->xtime_coarse_nsec            = tk->tkr_mono.xtime_nsec >>
327                                                         tk->tkr_mono.shift;
328         vdso_data->wtm_clock_sec                = tk->wall_to_monotonic.tv_sec;
329         vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
330
331         if (!use_syscall) {
332                 struct timespec btm = ktime_to_timespec(tk->offs_boot);
333
334                 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
335                 vdso_data->cs_cycle_last        = tk->tkr_mono.cycle_last;
336                 vdso_data->raw_time_sec         = tk->raw_sec;
337                 vdso_data->raw_time_nsec        = tk->tkr_raw.xtime_nsec;
338                 vdso_data->xtime_clock_sec      = tk->xtime_sec;
339                 vdso_data->xtime_clock_snsec    = tk->tkr_mono.xtime_nsec;
340                 vdso_data->cs_mono_mult         = tk->tkr_mono.mult;
341                 vdso_data->cs_raw_mult          = tk->tkr_raw.mult;
342                 /* tkr_mono.shift == tkr_raw.shift */
343                 vdso_data->cs_shift             = tk->tkr_mono.shift;
344                 vdso_data->btm_sec              = btm.tv_sec;
345                 vdso_data->btm_nsec             = btm.tv_nsec;
346         }
347
348         smp_wmb();
349         ++vdso_data->tb_seq_count;
350 }
351
352 void update_vsyscall_tz(void)
353 {
354         vdso_data->tz_minuteswest       = sys_tz.tz_minuteswest;
355         vdso_data->tz_dsttime           = sys_tz.tz_dsttime;
356 }