2 * Based on arch/arm/include/asm/uaccess.h
4 * Copyright (C) 2012 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ASM_UACCESS_H
19 #define __ASM_UACCESS_H
22 * User space memory access functions
24 #include <linux/bitops.h>
25 #include <linux/kasan-checks.h>
26 #include <linux/string.h>
27 #include <linux/thread_info.h>
29 #include <asm/alternative.h>
30 #include <asm/cpufeature.h>
31 #include <asm/processor.h>
32 #include <asm/ptrace.h>
33 #include <asm/sysreg.h>
34 #include <asm/errno.h>
35 #include <asm/memory.h>
36 #include <asm/compiler.h>
39 #define VERIFY_WRITE 1
42 * The exception table consists of pairs of relative offsets: the first
43 * is the relative offset to an instruction that is allowed to fault,
44 * and the second is the relative offset at which the program should
45 * continue. No registers are modified, so it is entirely up to the
46 * continuation code to figure out what to do.
48 * All the routines below use bits of fixup code that are out of line
49 * with the main instruction path. This means when everything is well,
50 * we don't even have to jump over them. Further, they do not intrude
51 * on our cache or tlb entries.
54 struct exception_table_entry
59 #define ARCH_HAS_RELATIVE_EXTABLE
61 extern int fixup_exception(struct pt_regs *regs);
63 #define get_ds() (KERNEL_DS)
64 #define get_fs() (current_thread_info()->addr_limit)
66 static inline void set_fs(mm_segment_t fs)
68 current_thread_info()->addr_limit = fs;
71 * Prevent a mispredicted conditional call to set_fs from forwarding
72 * the wrong address limit to access_ok under speculation.
78 * Enable/disable UAO so that copy_to_user() etc can access
79 * kernel memory with the unprivileged instructions.
81 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
82 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
84 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
88 #define segment_eq(a, b) ((a) == (b))
91 * Test whether a block of memory is a valid user space address.
92 * Returns 1 if the range is valid, 0 otherwise.
94 * This is equivalent to the following test:
95 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
97 static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
99 unsigned long limit = current_thread_info()->addr_limit;
101 __chk_user_ptr(addr);
103 // A + B <= C + 1 for all A,B,C, in four easy steps:
104 // 1: X = A + B; X' = X % 2^64
106 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
107 " csel %1, xzr, %1, hi\n"
108 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
109 // to compensate for the carry flag being set in step 4. For
110 // X > 2^64, X' merely has to remain nonzero, which it does.
111 " csinv %0, %0, xzr, cc\n"
112 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
113 // comes from the carry in being clear. Otherwise, we are
114 // testing X' - C == 0, subject to the previous adjustments.
115 " sbcs xzr, %0, %1\n"
117 : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
123 * When dealing with data aborts, watchpoints, or instruction traps we may end
124 * up with a tagged userland pointer. Clear the tag to get a sane pointer to
125 * pass on to access_ok(), for instance.
127 #define untagged_addr(addr) sign_extend64(addr, 55)
129 #define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
130 #define user_addr_max get_fs
132 #define _ASM_EXTABLE(from, to) \
133 " .pushsection __ex_table, \"a\"\n" \
135 " .long (" #from " - .), (" #to " - .)\n" \
139 * Sanitise a uaccess pointer such that it becomes NULL if above the
140 * current addr_limit.
142 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
143 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
145 void __user *safe_ptr;
148 " bics xzr, %1, %2\n"
149 " csel %0, %1, xzr, eq\n"
151 : "r" (ptr), "r" (current_thread_info()->addr_limit)
159 * The "__xxx" versions of the user access functions do not verify the address
160 * space - it must have been done previously with a separate "access_ok()"
163 * The "__xxx_error" versions set the third argument to -EFAULT if an error
164 * occurs, and leave it unchanged on success.
166 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
168 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
169 alt_instr " " reg "1, [%2]\n", feature) \
171 " .section .fixup, \"ax\"\n" \
177 _ASM_EXTABLE(1b, 3b) \
178 : "+r" (err), "=&r" (x) \
179 : "r" (addr), "i" (-EFAULT))
181 #define __get_user_err(x, ptr, err) \
183 unsigned long __gu_val; \
184 __chk_user_ptr(ptr); \
185 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
186 CONFIG_ARM64_PAN)); \
187 switch (sizeof(*(ptr))) { \
189 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
190 (err), ARM64_HAS_UAO); \
193 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
194 (err), ARM64_HAS_UAO); \
197 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
198 (err), ARM64_HAS_UAO); \
201 __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
202 (err), ARM64_HAS_UAO); \
207 (x) = (__force __typeof__(*(ptr)))__gu_val; \
208 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
209 CONFIG_ARM64_PAN)); \
212 #define __get_user_check(x, ptr, err) \
214 __typeof__(*(ptr)) __user *__p = (ptr); \
216 if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
217 __p = uaccess_mask_ptr(__p); \
218 __get_user_err((x), __p, (err)); \
220 (x) = 0; (err) = -EFAULT; \
224 #define __get_user_error(x, ptr, err) \
226 __get_user_check((x), (ptr), (err)); \
230 #define __get_user(x, ptr) \
233 __get_user_check((x), (ptr), __gu_err); \
237 #define __get_user_unaligned __get_user
239 #define get_user __get_user
241 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
243 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
244 alt_instr " " reg "1, [%2]\n", feature) \
246 " .section .fixup,\"ax\"\n" \
251 _ASM_EXTABLE(1b, 3b) \
253 : "r" (x), "r" (addr), "i" (-EFAULT))
255 #define __put_user_err(x, ptr, err) \
257 __typeof__(*(ptr)) __pu_val = (x); \
258 __chk_user_ptr(ptr); \
259 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
260 CONFIG_ARM64_PAN)); \
261 switch (sizeof(*(ptr))) { \
263 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
264 (err), ARM64_HAS_UAO); \
267 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
268 (err), ARM64_HAS_UAO); \
271 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
272 (err), ARM64_HAS_UAO); \
275 __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
276 (err), ARM64_HAS_UAO); \
281 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
282 CONFIG_ARM64_PAN)); \
285 #define __put_user_check(x, ptr, err) \
287 __typeof__(*(ptr)) __user *__p = (ptr); \
289 if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
290 __p = uaccess_mask_ptr(__p); \
291 __put_user_err((x), __p, (err)); \
297 #define __put_user_error(x, ptr, err) \
299 __put_user_check((x), (ptr), (err)); \
303 #define __put_user(x, ptr) \
306 __put_user_check((x), (ptr), __pu_err); \
310 #define __put_user_unaligned __put_user
312 #define put_user __put_user
314 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
315 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
316 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
318 static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
320 kasan_check_write(to, n);
321 check_object_size(to, n, false);
322 return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
325 static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
327 kasan_check_read(from, n);
328 check_object_size(from, n, true);
329 return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
332 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
334 unsigned long res = n;
335 kasan_check_write(to, n);
337 if (access_ok(VERIFY_READ, from, n)) {
338 check_object_size(to, n, false);
339 res = __arch_copy_from_user(to, from, n);
342 memset(to + (n - res), 0, res);
346 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
348 kasan_check_read(from, n);
350 if (access_ok(VERIFY_WRITE, to, n)) {
351 check_object_size(from, n, true);
352 n = __arch_copy_to_user(to, from, n);
357 static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
359 if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
360 n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
363 #define copy_in_user __copy_in_user
365 #define __copy_to_user_inatomic __copy_to_user
366 #define __copy_from_user_inatomic __copy_from_user
368 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
369 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
371 if (access_ok(VERIFY_WRITE, to, n))
372 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
375 #define clear_user __clear_user
377 extern long strncpy_from_user(char *dest, const char __user *src, long count);
379 extern __must_check long strlen_user(const char __user *str);
380 extern __must_check long strnlen_user(const char __user *str, long n);
382 #endif /* __ASM_UACCESS_H */