1 #ifndef _ASM_X86_UACCESS_32_H
2 #define _ASM_X86_UACCESS_32_H
5 * User space memory access functions
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
13 unsigned long __must_check __copy_to_user_ll
14 (void __user *to, const void *from, unsigned long n);
15 unsigned long __must_check __copy_from_user_ll
16 (void *to, const void __user *from, unsigned long n);
17 unsigned long __must_check __copy_from_user_ll_nozero
18 (void *to, const void __user *from, unsigned long n);
19 unsigned long __must_check __copy_from_user_ll_nocache
20 (void *to, const void __user *from, unsigned long n);
21 unsigned long __must_check __copy_from_user_ll_nocache_nozero
22 (void *to, const void __user *from, unsigned long n);
25 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
26 * @to: Destination address, in user space.
27 * @from: Source address, in kernel space.
28 * @n: Number of bytes to copy.
30 * Context: User context only.
32 * Copy data from kernel space to user space. Caller must check
33 * the specified block with access_ok() before calling this function.
34 * The caller should also make sure he pins the user space address
35 * so that we don't result in page fault and sleep.
37 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
38 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
39 * If a store crosses a page boundary and gets a fault, the x86 will not write
40 * anything, so this is accurate.
43 static __always_inline unsigned long __must_check
44 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
46 check_object_size(from, n, true);
47 if (__builtin_constant_p(n)) {
52 __uaccess_begin_nospec();
53 __put_user_size(*(u8 *)from, (u8 __user *)to,
58 __uaccess_begin_nospec();
59 __put_user_size(*(u16 *)from, (u16 __user *)to,
64 __uaccess_begin_nospec();
65 __put_user_size(*(u32 *)from, (u32 __user *)to,
70 __uaccess_begin_nospec();
71 __put_user_size(*(u64 *)from, (u64 __user *)to,
77 return __copy_to_user_ll(to, from, n);
81 * __copy_to_user: - Copy a block of data into user space, with less checking.
82 * @to: Destination address, in user space.
83 * @from: Source address, in kernel space.
84 * @n: Number of bytes to copy.
86 * Context: User context only. This function may sleep if pagefaults are
89 * Copy data from kernel space to user space. Caller must check
90 * the specified block with access_ok() before calling this function.
92 * Returns number of bytes that could not be copied.
93 * On success, this will be zero.
95 static __always_inline unsigned long __must_check
96 __copy_to_user(void __user *to, const void *from, unsigned long n)
99 return __copy_to_user_inatomic(to, from, n);
102 static __always_inline unsigned long
103 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
105 /* Avoid zeroing the tail if the copy fails..
106 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
107 * but as the zeroing behaviour is only significant when n is not
108 * constant, that shouldn't be a problem.
110 if (__builtin_constant_p(n)) {
115 __uaccess_begin_nospec();
116 __get_user_size(*(u8 *)to, from, 1, ret, 1);
120 __uaccess_begin_nospec();
121 __get_user_size(*(u16 *)to, from, 2, ret, 2);
125 __uaccess_begin_nospec();
126 __get_user_size(*(u32 *)to, from, 4, ret, 4);
131 return __copy_from_user_ll_nozero(to, from, n);
135 * __copy_from_user: - Copy a block of data from user space, with less checking.
136 * @to: Destination address, in kernel space.
137 * @from: Source address, in user space.
138 * @n: Number of bytes to copy.
140 * Context: User context only. This function may sleep if pagefaults are
143 * Copy data from user space to kernel space. Caller must check
144 * the specified block with access_ok() before calling this function.
146 * Returns number of bytes that could not be copied.
147 * On success, this will be zero.
149 * If some data could not be copied, this function will pad the copied
150 * data to the requested size using zero bytes.
152 * An alternate version - __copy_from_user_inatomic() - may be called from
153 * atomic context and will fail rather than sleep. In this case the
154 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
155 * for explanation of why this is needed.
157 static __always_inline unsigned long
158 __copy_from_user(void *to, const void __user *from, unsigned long n)
161 check_object_size(to, n, false);
162 if (__builtin_constant_p(n)) {
167 __uaccess_begin_nospec();
168 __get_user_size(*(u8 *)to, from, 1, ret, 1);
172 __uaccess_begin_nospec();
173 __get_user_size(*(u16 *)to, from, 2, ret, 2);
177 __uaccess_begin_nospec();
178 __get_user_size(*(u32 *)to, from, 4, ret, 4);
183 return __copy_from_user_ll(to, from, n);
186 static __always_inline unsigned long __copy_from_user_nocache(void *to,
187 const void __user *from, unsigned long n)
190 if (__builtin_constant_p(n)) {
195 __uaccess_begin_nospec();
196 __get_user_size(*(u8 *)to, from, 1, ret, 1);
200 __uaccess_begin_nospec();
201 __get_user_size(*(u16 *)to, from, 2, ret, 2);
205 __uaccess_begin_nospec();
206 __get_user_size(*(u32 *)to, from, 4, ret, 4);
211 return __copy_from_user_ll_nocache(to, from, n);
214 static __always_inline unsigned long
215 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
218 return __copy_from_user_ll_nocache_nozero(to, from, n);
221 #endif /* _ASM_X86_UACCESS_32_H */