OSDN Git Service

Linux 4.9.95
[android-x86/kernel.git] / arch / arm64 / include / asm / uaccess.h
1 /*
2  * Based on arch/arm/include/asm/uaccess.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_UACCESS_H
19 #define __ASM_UACCESS_H
20
21 /*
22  * User space memory access functions
23  */
24 #include <linux/bitops.h>
25 #include <linux/kasan-checks.h>
26 #include <linux/string.h>
27 #include <linux/thread_info.h>
28
29 #include <asm/alternative.h>
30 #include <asm/cpufeature.h>
31 #include <asm/processor.h>
32 #include <asm/ptrace.h>
33 #include <asm/sysreg.h>
34 #include <asm/errno.h>
35 #include <asm/memory.h>
36 #include <asm/compiler.h>
37
38 #define VERIFY_READ 0
39 #define VERIFY_WRITE 1
40
41 /*
42  * The exception table consists of pairs of relative offsets: the first
43  * is the relative offset to an instruction that is allowed to fault,
44  * and the second is the relative offset at which the program should
45  * continue. No registers are modified, so it is entirely up to the
46  * continuation code to figure out what to do.
47  *
48  * All the routines below use bits of fixup code that are out of line
49  * with the main instruction path.  This means when everything is well,
50  * we don't even have to jump over them.  Further, they do not intrude
51  * on our cache or tlb entries.
52  */
53
54 struct exception_table_entry
55 {
56         int insn, fixup;
57 };
58
59 #define ARCH_HAS_RELATIVE_EXTABLE
60
61 extern int fixup_exception(struct pt_regs *regs);
62
63 #define get_ds()        (KERNEL_DS)
64 #define get_fs()        (current_thread_info()->addr_limit)
65
66 static inline void set_fs(mm_segment_t fs)
67 {
68         current_thread_info()->addr_limit = fs;
69
70         /*
71          * Prevent a mispredicted conditional call to set_fs from forwarding
72          * the wrong address limit to access_ok under speculation.
73          */
74         dsb(nsh);
75         isb();
76
77         /*
78          * Enable/disable UAO so that copy_to_user() etc can access
79          * kernel memory with the unprivileged instructions.
80          */
81         if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
82                 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
83         else
84                 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
85                                 CONFIG_ARM64_UAO));
86 }
87
88 #define segment_eq(a, b)        ((a) == (b))
89
90 /*
91  * Test whether a block of memory is a valid user space address.
92  * Returns 1 if the range is valid, 0 otherwise.
93  *
94  * This is equivalent to the following test:
95  * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
96  */
97 static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
98 {
99         unsigned long limit = current_thread_info()->addr_limit;
100
101         __chk_user_ptr(addr);
102         asm volatile(
103         // A + B <= C + 1 for all A,B,C, in four easy steps:
104         // 1: X = A + B; X' = X % 2^64
105         "       adds    %0, %0, %2\n"
106         // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
107         "       csel    %1, xzr, %1, hi\n"
108         // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
109         //    to compensate for the carry flag being set in step 4. For
110         //    X > 2^64, X' merely has to remain nonzero, which it does.
111         "       csinv   %0, %0, xzr, cc\n"
112         // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
113         //    comes from the carry in being clear. Otherwise, we are
114         //    testing X' - C == 0, subject to the previous adjustments.
115         "       sbcs    xzr, %0, %1\n"
116         "       cset    %0, ls\n"
117         : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
118
119         return addr;
120 }
121
122 /*
123  * When dealing with data aborts, watchpoints, or instruction traps we may end
124  * up with a tagged userland pointer. Clear the tag to get a sane pointer to
125  * pass on to access_ok(), for instance.
126  */
127 #define untagged_addr(addr)             sign_extend64(addr, 55)
128
129 #define access_ok(type, addr, size)     __range_ok((unsigned long)(addr), size)
130 #define user_addr_max                   get_fs
131
132 #define _ASM_EXTABLE(from, to)                                          \
133         "       .pushsection    __ex_table, \"a\"\n"                    \
134         "       .align          3\n"                                    \
135         "       .long           (" #from " - .), (" #to " - .)\n"       \
136         "       .popsection\n"
137
138 /*
139  * Sanitise a uaccess pointer such that it becomes NULL if above the
140  * current addr_limit.
141  */
142 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
143 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
144 {
145         void __user *safe_ptr;
146
147         asm volatile(
148         "       bics    xzr, %1, %2\n"
149         "       csel    %0, %1, xzr, eq\n"
150         : "=&r" (safe_ptr)
151         : "r" (ptr), "r" (current_thread_info()->addr_limit)
152         : "cc");
153
154         csdb();
155         return safe_ptr;
156 }
157
158 /*
159  * The "__xxx" versions of the user access functions do not verify the address
160  * space - it must have been done previously with a separate "access_ok()"
161  * call.
162  *
163  * The "__xxx_error" versions set the third argument to -EFAULT if an error
164  * occurs, and leave it unchanged on success.
165  */
166 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature)    \
167         asm volatile(                                                   \
168         "1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",                  \
169                         alt_instr " " reg "1, [%2]\n", feature)         \
170         "2:\n"                                                          \
171         "       .section .fixup, \"ax\"\n"                              \
172         "       .align  2\n"                                            \
173         "3:     mov     %w0, %3\n"                                      \
174         "       mov     %1, #0\n"                                       \
175         "       b       2b\n"                                           \
176         "       .previous\n"                                            \
177         _ASM_EXTABLE(1b, 3b)                                            \
178         : "+r" (err), "=&r" (x)                                         \
179         : "r" (addr), "i" (-EFAULT))
180
181 #define __get_user_err(x, ptr, err)                                     \
182 do {                                                                    \
183         unsigned long __gu_val;                                         \
184         __chk_user_ptr(ptr);                                            \
185         asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
186                         CONFIG_ARM64_PAN));                             \
187         switch (sizeof(*(ptr))) {                                       \
188         case 1:                                                         \
189                 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
190                                (err), ARM64_HAS_UAO);                   \
191                 break;                                                  \
192         case 2:                                                         \
193                 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr),  \
194                                (err), ARM64_HAS_UAO);                   \
195                 break;                                                  \
196         case 4:                                                         \
197                 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr),    \
198                                (err), ARM64_HAS_UAO);                   \
199                 break;                                                  \
200         case 8:                                                         \
201                 __get_user_asm("ldr", "ldtr", "%",  __gu_val, (ptr),    \
202                                (err), ARM64_HAS_UAO);                   \
203                 break;                                                  \
204         default:                                                        \
205                 BUILD_BUG();                                            \
206         }                                                               \
207         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
208         asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
209                         CONFIG_ARM64_PAN));                             \
210 } while (0)
211
212 #define __get_user_check(x, ptr, err)                                   \
213 ({                                                                      \
214         __typeof__(*(ptr)) __user *__p = (ptr);                         \
215         might_fault();                                                  \
216         if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {                \
217                 __p = uaccess_mask_ptr(__p);                            \
218                 __get_user_err((x), __p, (err));                        \
219         } else {                                                        \
220                 (x) = 0; (err) = -EFAULT;                               \
221         }                                                               \
222 })
223
224 #define __get_user_error(x, ptr, err)                                   \
225 ({                                                                      \
226         __get_user_check((x), (ptr), (err));                            \
227         (void)0;                                                        \
228 })
229
230 #define __get_user(x, ptr)                                              \
231 ({                                                                      \
232         int __gu_err = 0;                                               \
233         __get_user_check((x), (ptr), __gu_err);                         \
234         __gu_err;                                                       \
235 })
236
237 #define __get_user_unaligned __get_user
238
239 #define get_user        __get_user
240
241 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature)    \
242         asm volatile(                                                   \
243         "1:"ALTERNATIVE(instr "     " reg "1, [%2]\n",                  \
244                         alt_instr " " reg "1, [%2]\n", feature)         \
245         "2:\n"                                                          \
246         "       .section .fixup,\"ax\"\n"                               \
247         "       .align  2\n"                                            \
248         "3:     mov     %w0, %3\n"                                      \
249         "       b       2b\n"                                           \
250         "       .previous\n"                                            \
251         _ASM_EXTABLE(1b, 3b)                                            \
252         : "+r" (err)                                                    \
253         : "r" (x), "r" (addr), "i" (-EFAULT))
254
255 #define __put_user_err(x, ptr, err)                                     \
256 do {                                                                    \
257         __typeof__(*(ptr)) __pu_val = (x);                              \
258         __chk_user_ptr(ptr);                                            \
259         asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
260                         CONFIG_ARM64_PAN));                             \
261         switch (sizeof(*(ptr))) {                                       \
262         case 1:                                                         \
263                 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),  \
264                                (err), ARM64_HAS_UAO);                   \
265                 break;                                                  \
266         case 2:                                                         \
267                 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr),  \
268                                (err), ARM64_HAS_UAO);                   \
269                 break;                                                  \
270         case 4:                                                         \
271                 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr),    \
272                                (err), ARM64_HAS_UAO);                   \
273                 break;                                                  \
274         case 8:                                                         \
275                 __put_user_asm("str", "sttr", "%", __pu_val, (ptr),     \
276                                (err), ARM64_HAS_UAO);                   \
277                 break;                                                  \
278         default:                                                        \
279                 BUILD_BUG();                                            \
280         }                                                               \
281         asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
282                         CONFIG_ARM64_PAN));                             \
283 } while (0)
284
285 #define __put_user_check(x, ptr, err)                                   \
286 ({                                                                      \
287         __typeof__(*(ptr)) __user *__p = (ptr);                         \
288         might_fault();                                                  \
289         if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {               \
290                 __p = uaccess_mask_ptr(__p);                            \
291                 __put_user_err((x), __p, (err));                        \
292         } else  {                                                       \
293                 (err) = -EFAULT;                                        \
294         }                                                               \
295 })
296
297 #define __put_user_error(x, ptr, err)                                   \
298 ({                                                                      \
299         __put_user_check((x), (ptr), (err));                            \
300         (void)0;                                                        \
301 })
302
303 #define __put_user(x, ptr)                                              \
304 ({                                                                      \
305         int __pu_err = 0;                                               \
306         __put_user_check((x), (ptr), __pu_err);                         \
307         __pu_err;                                                       \
308 })
309
310 #define __put_user_unaligned __put_user
311
312 #define put_user        __put_user
313
314 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
315 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
316 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
317
318 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
319 {
320         kasan_check_write(to, n);
321         check_object_size(to, n, false);
322         return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
323 }
324
325 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
326 {
327         kasan_check_read(from, n);
328         check_object_size(from, n, true);
329         return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
330 }
331
332 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
333 {
334         unsigned long res = n;
335         kasan_check_write(to, n);
336
337         if (access_ok(VERIFY_READ, from, n)) {
338                 check_object_size(to, n, false);
339                 res = __arch_copy_from_user(to, from, n);
340         }
341         if (unlikely(res))
342                 memset(to + (n - res), 0, res);
343         return res;
344 }
345
346 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
347 {
348         kasan_check_read(from, n);
349
350         if (access_ok(VERIFY_WRITE, to, n)) {
351                 check_object_size(from, n, true);
352                 n = __arch_copy_to_user(to, from, n);
353         }
354         return n;
355 }
356
357 static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
358 {
359         if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
360                 n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
361         return n;
362 }
363 #define copy_in_user __copy_in_user
364
365 #define __copy_to_user_inatomic __copy_to_user
366 #define __copy_from_user_inatomic __copy_from_user
367
368 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
369 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
370 {
371         if (access_ok(VERIFY_WRITE, to, n))
372                 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
373         return n;
374 }
375 #define clear_user      __clear_user
376
377 extern long strncpy_from_user(char *dest, const char __user *src, long count);
378
379 extern __must_check long strlen_user(const char __user *str);
380 extern __must_check long strnlen_user(const char __user *str, long n);
381
382 #endif /* __ASM_UACCESS_H */