OSDN Git Service

powerpc: Use barrier_nospec in copy_from_user()
authorMichael Ellerman <mpe@ellerman.id.au>
Sun, 21 Apr 2019 14:20:15 +0000 (00:20 +1000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 May 2019 17:44:48 +0000 (19:44 +0200)
commit ddf35cf3764b5a182b178105f57515b42e2634f8 upstream.

Based on the x86 commit doing the same.

See commit 304ec1b05031 ("x86/uaccess: Use __uaccess_begin_nospec()
and uaccess_try_nospec") and b3bbfb3fb5d2 ("x86: Introduce
__uaccess_begin_nospec() and uaccess_try_nospec") for more detail.

In all cases we are ordering the load from the potentially
user-controlled pointer vs a previous branch based on an access_ok()
check or similar.

Base on a patch from Michal Suchanek.

Signed-off-by: Michal Suchanek <msuchanek@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/powerpc/include/asm/uaccess.h

index 05f1389..e51ce5a 100644 (file)
@@ -269,6 +269,7 @@ do {                                                                \
        __chk_user_ptr(ptr);                                    \
        if (!is_kernel_addr((unsigned long)__gu_addr))          \
                might_fault();                                  \
+       barrier_nospec();                                       \
        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
        (x) = (__typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                               \
@@ -283,6 +284,7 @@ do {                                                                \
        __chk_user_ptr(ptr);                                    \
        if (!is_kernel_addr((unsigned long)__gu_addr))          \
                might_fault();                                  \
+       barrier_nospec();                                       \
        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                               \
@@ -295,8 +297,10 @@ do {                                                               \
        unsigned long  __gu_val = 0;                                    \
        __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
        might_fault();                                                  \
-       if (access_ok(VERIFY_READ, __gu_addr, (size)))                  \
+       if (access_ok(VERIFY_READ, __gu_addr, (size))) {                \
+               barrier_nospec();                                       \
                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+       }                                                               \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
        __gu_err;                                                       \
 })
@@ -307,6 +311,7 @@ do {                                                                \
        unsigned long __gu_val;                                 \
        __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
        __chk_user_ptr(ptr);                                    \
+       barrier_nospec();                                       \
        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                               \
@@ -323,8 +328,10 @@ extern unsigned long __copy_tofrom_user(void __user *to,
 static inline unsigned long copy_from_user(void *to,
                const void __user *from, unsigned long n)
 {
-       if (likely(access_ok(VERIFY_READ, from, n)))
+       if (likely(access_ok(VERIFY_READ, from, n))) {
+               barrier_nospec();
                return __copy_tofrom_user((__force void __user *)to, from, n);
+       }
        memset(to, 0, n);
        return n;
 }
@@ -359,21 +366,27 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
 
                switch (n) {
                case 1:
+                       barrier_nospec();
                        __get_user_size(*(u8 *)to, from, 1, ret);
                        break;
                case 2:
+                       barrier_nospec();
                        __get_user_size(*(u16 *)to, from, 2, ret);
                        break;
                case 4:
+                       barrier_nospec();
                        __get_user_size(*(u32 *)to, from, 4, ret);
                        break;
                case 8:
+                       barrier_nospec();
                        __get_user_size(*(u64 *)to, from, 8, ret);
                        break;
                }
                if (ret == 0)
                        return 0;
        }
+
+       barrier_nospec();
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
@@ -400,6 +413,7 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
                if (ret == 0)
                        return 0;
        }
+
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }