OSDN Git Service

Merge 4.9.79 into android-4.9
[android-x86/kernel.git] / arch / arm64 / include / asm / uaccess.h
1 /*
2  * Based on arch/arm/include/asm/uaccess.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_UACCESS_H
19 #define __ASM_UACCESS_H
20
21 #include <asm/alternative.h>
22 #include <asm/kernel-pgtable.h>
23 #include <asm/mmu.h>
24 #include <asm/sysreg.h>
25
26 #ifndef __ASSEMBLY__
27
28 /*
29  * User space memory access functions
30  */
31 #include <linux/bitops.h>
32 #include <linux/kasan-checks.h>
33 #include <linux/string.h>
34 #include <linux/thread_info.h>
35
36 #include <asm/cpufeature.h>
37 #include <asm/ptrace.h>
38 #include <asm/errno.h>
39 #include <asm/memory.h>
40 #include <asm/compiler.h>
41
42 #define VERIFY_READ 0
43 #define VERIFY_WRITE 1
44
45 /*
46  * The exception table consists of pairs of relative offsets: the first
47  * is the relative offset to an instruction that is allowed to fault,
48  * and the second is the relative offset at which the program should
49  * continue. No registers are modified, so it is entirely up to the
50  * continuation code to figure out what to do.
51  *
52  * All the routines below use bits of fixup code that are out of line
53  * with the main instruction path.  This means when everything is well,
54  * we don't even have to jump over them.  Further, they do not intrude
55  * on our cache or tlb entries.
56  */
57
58 struct exception_table_entry
59 {
60         int insn, fixup;
61 };
62
63 #define ARCH_HAS_RELATIVE_EXTABLE
64
65 extern int fixup_exception(struct pt_regs *regs);
66
67 #define KERNEL_DS       (-1UL)
68 #define get_ds()        (KERNEL_DS)
69
70 #define USER_DS         TASK_SIZE_64
71 #define get_fs()        (current_thread_info()->addr_limit)
72
73 static inline void set_fs(mm_segment_t fs)
74 {
75         current_thread_info()->addr_limit = fs;
76
77         /*
78          * Enable/disable UAO so that copy_to_user() etc can access
79          * kernel memory with the unprivileged instructions.
80          */
81         if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
82                 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
83         else
84                 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
85                                 CONFIG_ARM64_UAO));
86 }
87
88 #define segment_eq(a, b)        ((a) == (b))
89
90 /*
91  * Test whether a block of memory is a valid user space address.
92  * Returns 1 if the range is valid, 0 otherwise.
93  *
94  * This is equivalent to the following test:
95  * (u65)addr + (u65)size <= current->addr_limit
96  *
97  * This needs 65-bit arithmetic.
98  */
99 #define __range_ok(addr, size)                                          \
100 ({                                                                      \
101         unsigned long __addr = (unsigned long __force)(addr);           \
102         unsigned long flag, roksum;                                     \
103         __chk_user_ptr(addr);                                           \
104         asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"         \
105                 : "=&r" (flag), "=&r" (roksum)                          \
106                 : "1" (__addr), "Ir" (size),                            \
107                   "r" (current_thread_info()->addr_limit)               \
108                 : "cc");                                                \
109         flag;                                                           \
110 })
111
112 /*
113  * When dealing with data aborts, watchpoints, or instruction traps we may end
114  * up with a tagged userland pointer. Clear the tag to get a sane pointer to
115  * pass on to access_ok(), for instance.
116  */
117 #define untagged_addr(addr)             sign_extend64(addr, 55)
118
119 #define access_ok(type, addr, size)     __range_ok(addr, size)
120 #define user_addr_max                   get_fs
121
122 #define _ASM_EXTABLE(from, to)                                          \
123         "       .pushsection    __ex_table, \"a\"\n"                    \
124         "       .align          3\n"                                    \
125         "       .long           (" #from " - .), (" #to " - .)\n"       \
126         "       .popsection\n"
127
128 /*
129  * User access enabling/disabling.
130  */
131 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
132 static inline void __uaccess_ttbr0_disable(void)
133 {
134         unsigned long flags, ttbr;
135
136         local_irq_save(flags);
137         ttbr = read_sysreg(ttbr1_el1);
138         ttbr &= ~TTBR_ASID_MASK;
139         /* reserved_ttbr0 placed at the end of swapper_pg_dir */
140         write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
141         isb();
142         /* Set reserved ASID */
143         write_sysreg(ttbr, ttbr1_el1);
144         isb();
145         local_irq_restore(flags);
146 }
147
148 static inline void __uaccess_ttbr0_enable(void)
149 {
150         unsigned long flags, ttbr0, ttbr1;
151
152         /*
153          * Disable interrupts to avoid preemption between reading the 'ttbr0'
154          * variable and the MSR. A context switch could trigger an ASID
155          * roll-over and an update of 'ttbr0'.
156          */
157         local_irq_save(flags);
158         ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
159
160         /* Restore active ASID */
161         ttbr1 = read_sysreg(ttbr1_el1);
162         ttbr1 &= ~TTBR_ASID_MASK;               /* safety measure */
163         ttbr1 |= ttbr0 & TTBR_ASID_MASK;
164         write_sysreg(ttbr1, ttbr1_el1);
165         isb();
166
167         /* Restore user page table */
168         write_sysreg(ttbr0, ttbr0_el1);
169         isb();
170         local_irq_restore(flags);
171 }
172
173 static inline bool uaccess_ttbr0_disable(void)
174 {
175         if (!system_uses_ttbr0_pan())
176                 return false;
177         __uaccess_ttbr0_disable();
178         return true;
179 }
180
181 static inline bool uaccess_ttbr0_enable(void)
182 {
183         if (!system_uses_ttbr0_pan())
184                 return false;
185         __uaccess_ttbr0_enable();
186         return true;
187 }
188 #else
189 static inline bool uaccess_ttbr0_disable(void)
190 {
191         return false;
192 }
193
194 static inline bool uaccess_ttbr0_enable(void)
195 {
196         return false;
197 }
198 #endif
199
200 #define __uaccess_disable(alt)                                          \
201 do {                                                                    \
202         if (!uaccess_ttbr0_disable())                                   \
203                 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,          \
204                                 CONFIG_ARM64_PAN));                     \
205 } while (0)
206
207 #define __uaccess_enable(alt)                                           \
208 do {                                                                    \
209         if (!uaccess_ttbr0_enable())                                    \
210                 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,          \
211                                 CONFIG_ARM64_PAN));                     \
212 } while (0)
213
214 static inline void uaccess_disable(void)
215 {
216         __uaccess_disable(ARM64_HAS_PAN);
217 }
218
219 static inline void uaccess_enable(void)
220 {
221         __uaccess_enable(ARM64_HAS_PAN);
222 }
223
224 /*
225  * These functions are no-ops when UAO is present.
226  */
227 static inline void uaccess_disable_not_uao(void)
228 {
229         __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
230 }
231
232 static inline void uaccess_enable_not_uao(void)
233 {
234         __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
235 }
236
237 /*
238  * The "__xxx" versions of the user access functions do not verify the address
239  * space - it must have been done previously with a separate "access_ok()"
240  * call.
241  *
242  * The "__xxx_error" versions set the third argument to -EFAULT if an error
243  * occurs, and leave it unchanged on success.
244  */
245 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature)    \
246         asm volatile(                                                   \
247         "1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",                  \
248                         alt_instr " " reg "1, [%2]\n", feature)         \
249         "2:\n"                                                          \
250         "       .section .fixup, \"ax\"\n"                              \
251         "       .align  2\n"                                            \
252         "3:     mov     %w0, %3\n"                                      \
253         "       mov     %1, #0\n"                                       \
254         "       b       2b\n"                                           \
255         "       .previous\n"                                            \
256         _ASM_EXTABLE(1b, 3b)                                            \
257         : "+r" (err), "=&r" (x)                                         \
258         : "r" (addr), "i" (-EFAULT))
259
260 #define __get_user_err(x, ptr, err)                                     \
261 do {                                                                    \
262         unsigned long __gu_val;                                         \
263         __chk_user_ptr(ptr);                                            \
264         uaccess_enable_not_uao();                                       \
265         switch (sizeof(*(ptr))) {                                       \
266         case 1:                                                         \
267                 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
268                                (err), ARM64_HAS_UAO);                   \
269                 break;                                                  \
270         case 2:                                                         \
271                 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr),  \
272                                (err), ARM64_HAS_UAO);                   \
273                 break;                                                  \
274         case 4:                                                         \
275                 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr),    \
276                                (err), ARM64_HAS_UAO);                   \
277                 break;                                                  \
278         case 8:                                                         \
279                 __get_user_asm("ldr", "ldtr", "%",  __gu_val, (ptr),    \
280                                (err), ARM64_HAS_UAO);                   \
281                 break;                                                  \
282         default:                                                        \
283                 BUILD_BUG();                                            \
284         }                                                               \
285         uaccess_disable_not_uao();                                      \
286         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
287 } while (0)
288
289 #define __get_user(x, ptr)                                              \
290 ({                                                                      \
291         int __gu_err = 0;                                               \
292         __get_user_err((x), (ptr), __gu_err);                           \
293         __gu_err;                                                       \
294 })
295
296 #define __get_user_error(x, ptr, err)                                   \
297 ({                                                                      \
298         __get_user_err((x), (ptr), (err));                              \
299         (void)0;                                                        \
300 })
301
302 #define __get_user_unaligned __get_user
303
304 #define get_user(x, ptr)                                                \
305 ({                                                                      \
306         __typeof__(*(ptr)) __user *__p = (ptr);                         \
307         might_fault();                                                  \
308         access_ok(VERIFY_READ, __p, sizeof(*__p)) ?                     \
309                 __get_user((x), __p) :                                  \
310                 ((x) = 0, -EFAULT);                                     \
311 })
312
313 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)    \
314         asm volatile(                                                   \
315         "1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",                  \
316                         alt_instr " " reg "1, [%2]\n", feature)         \
317         "2:\n"                                                          \
318         "       .section .fixup,\"ax\"\n"                               \
319         "       .align  2\n"                                            \
320         "3:     mov     %w0, %3\n"                                      \
321         "       b       2b\n"                                           \
322         "       .previous\n"                                            \
323         _ASM_EXTABLE(1b, 3b)                                            \
324         : "+r" (err)                                                    \
325         : "r" (x), "r" (addr), "i" (-EFAULT))
326
327 #define __put_user_err(x, ptr, err)                                     \
328 do {                                                                    \
329         __typeof__(*(ptr)) __pu_val = (x);                              \
330         __chk_user_ptr(ptr);                                            \
331         uaccess_enable_not_uao();                                       \
332         switch (sizeof(*(ptr))) {                                       \
333         case 1:                                                         \
334                 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),  \
335                                (err), ARM64_HAS_UAO);                   \
336                 break;                                                  \
337         case 2:                                                         \
338                 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr),  \
339                                (err), ARM64_HAS_UAO);                   \
340                 break;                                                  \
341         case 4:                                                         \
342                 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr),    \
343                                (err), ARM64_HAS_UAO);                   \
344                 break;                                                  \
345         case 8:                                                         \
346                 __put_user_asm("str", "sttr", "%", __pu_val, (ptr),     \
347                                (err), ARM64_HAS_UAO);                   \
348                 break;                                                  \
349         default:                                                        \
350                 BUILD_BUG();                                            \
351         }                                                               \
352         uaccess_disable_not_uao();                                      \
353 } while (0)
354
355 #define __put_user(x, ptr)                                              \
356 ({                                                                      \
357         int __pu_err = 0;                                               \
358         __put_user_err((x), (ptr), __pu_err);                           \
359         __pu_err;                                                       \
360 })
361
362 #define __put_user_error(x, ptr, err)                                   \
363 ({                                                                      \
364         __put_user_err((x), (ptr), (err));                              \
365         (void)0;                                                        \
366 })
367
368 #define __put_user_unaligned __put_user
369
370 #define put_user(x, ptr)                                                \
371 ({                                                                      \
372         __typeof__(*(ptr)) __user *__p = (ptr);                         \
373         might_fault();                                                  \
374         access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ?                    \
375                 __put_user((x), __p) :                                  \
376                 -EFAULT;                                                \
377 })
378
379 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
380 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
381 extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
382 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
383
384 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
385 {
386         kasan_check_write(to, n);
387         check_object_size(to, n, false);
388         return __arch_copy_from_user(to, from, n);
389 }
390
391 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
392 {
393         kasan_check_read(from, n);
394         check_object_size(from, n, true);
395         return __arch_copy_to_user(to, from, n);
396 }
397
398 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
399 {
400         unsigned long res = n;
401         kasan_check_write(to, n);
402
403         if (access_ok(VERIFY_READ, from, n)) {
404                 check_object_size(to, n, false);
405                 res = __arch_copy_from_user(to, from, n);
406         }
407         if (unlikely(res))
408                 memset(to + (n - res), 0, res);
409         return res;
410 }
411
412 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
413 {
414         kasan_check_read(from, n);
415
416         if (access_ok(VERIFY_WRITE, to, n)) {
417                 check_object_size(from, n, true);
418                 n = __arch_copy_to_user(to, from, n);
419         }
420         return n;
421 }
422
423 static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
424 {
425         if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
426                 n = __copy_in_user(to, from, n);
427         return n;
428 }
429
430 #define __copy_to_user_inatomic __copy_to_user
431 #define __copy_from_user_inatomic __copy_from_user
432
433 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
434 {
435         if (access_ok(VERIFY_WRITE, to, n))
436                 n = __clear_user(to, n);
437         return n;
438 }
439
440 extern long strncpy_from_user(char *dest, const char __user *src, long count);
441
442 extern __must_check long strlen_user(const char __user *str);
443 extern __must_check long strnlen_user(const char __user *str, long n);
444
445 #else   /* __ASSEMBLY__ */
446
447 #include <asm/assembler.h>
448
449 /*
450  * User access enabling/disabling macros.
451  */
452 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
453         .macro  __uaccess_ttbr0_disable, tmp1
454         mrs     \tmp1, ttbr1_el1                // swapper_pg_dir
455         bic     \tmp1, \tmp1, #TTBR_ASID_MASK
456         add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
457         msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
458         isb
459         sub     \tmp1, \tmp1, #SWAPPER_DIR_SIZE
460         msr     ttbr1_el1, \tmp1                // set reserved ASID
461         isb
462         .endm
463
464         .macro  __uaccess_ttbr0_enable, tmp1, tmp2
465         get_thread_info \tmp1
466         ldr     \tmp1, [\tmp1, #TSK_TI_TTBR0]   // load saved TTBR0_EL1
467         mrs     \tmp2, ttbr1_el1
468         extr    \tmp2, \tmp2, \tmp1, #48
469         ror     \tmp2, \tmp2, #16
470         msr     ttbr1_el1, \tmp2                // set the active ASID
471         isb
472         msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
473         isb
474         .endm
475
476         .macro  uaccess_ttbr0_disable, tmp1, tmp2
477 alternative_if_not ARM64_HAS_PAN
478         save_and_disable_irq \tmp2              // avoid preemption
479         __uaccess_ttbr0_disable \tmp1
480         restore_irq \tmp2
481 alternative_else_nop_endif
482         .endm
483
484         .macro  uaccess_ttbr0_enable, tmp1, tmp2, tmp3
485 alternative_if_not ARM64_HAS_PAN
486         save_and_disable_irq \tmp3              // avoid preemption
487         __uaccess_ttbr0_enable \tmp1, \tmp2
488         restore_irq \tmp3
489 alternative_else_nop_endif
490         .endm
491 #else
492         .macro  uaccess_ttbr0_disable, tmp1, tmp2
493         .endm
494
495         .macro  uaccess_ttbr0_enable, tmp1, tmp2, tmp3
496         .endm
497 #endif
498
499 /*
500  * These macros are no-ops when UAO is present.
501  */
502         .macro  uaccess_disable_not_uao, tmp1, tmp2
503         uaccess_ttbr0_disable \tmp1, \tmp2
504 alternative_if ARM64_ALT_PAN_NOT_UAO
505         SET_PSTATE_PAN(1)
506 alternative_else_nop_endif
507         .endm
508
509         .macro  uaccess_enable_not_uao, tmp1, tmp2, tmp3
510         uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
511 alternative_if ARM64_ALT_PAN_NOT_UAO
512         SET_PSTATE_PAN(0)
513 alternative_else_nop_endif
514         .endm
515
516 #endif  /* __ASSEMBLY__ */
517
518 #endif /* __ASM_UACCESS_H */