OSDN Git Service

arm64: uaccess: Remove uaccess_*_not_uao asm macros
authorPavel Tatashin <pasha.tatashin@soleen.com>
Wed, 20 Nov 2019 17:07:40 +0000 (12:07 -0500)
committerWill Deacon <will@kernel.org>
Wed, 20 Nov 2019 18:51:54 +0000 (18:51 +0000)
It is safer and simpler to drop the uaccess assembly macros in favour of
inline C functions. Although this bloats the Image size slightly, it
aligns our user copy routines with '{get,put}_user()' and generally
makes the code a lot easier to reason about.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
[will: tweaked commit message and changed temporary variable names]
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/asm-uaccess.h
arch/arm64/include/asm/uaccess.h
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_to_user.S
arch/arm64/lib/uaccess_flushcache.c

index 5bf9638..c764cc8 100644 (file)
@@ -59,23 +59,6 @@ alternative_else_nop_endif
 #endif
 
 /*
- * These macros are no-ops when UAO is present.
- */
-       .macro  uaccess_disable_not_uao, tmp1, tmp2
-       uaccess_ttbr0_disable \tmp1, \tmp2
-alternative_if ARM64_ALT_PAN_NOT_UAO
-       SET_PSTATE_PAN(1)
-alternative_else_nop_endif
-       .endm
-
-       .macro  uaccess_enable_not_uao, tmp1, tmp2, tmp3
-       uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
-alternative_if ARM64_ALT_PAN_NOT_UAO
-       SET_PSTATE_PAN(0)
-alternative_else_nop_endif
-       .endm
-
-/*
  * Remove the address tag from a virtual address, if present.
  */
        .macro  untagged_addr, dst, addr
index 097d6bf..127712b 100644 (file)
@@ -378,20 +378,34 @@ do {                                                                      \
 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
 #define raw_copy_from_user(to, from, n)                                        \
 ({                                                                     \
-       __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n));     \
+       unsigned long __acfu_ret;                                       \
+       uaccess_enable_not_uao();                                       \
+       __acfu_ret = __arch_copy_from_user((to),                        \
+                                     __uaccess_mask_ptr(from), (n));   \
+       uaccess_disable_not_uao();                                      \
+       __acfu_ret;                                                     \
 })
 
 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
 #define raw_copy_to_user(to, from, n)                                  \
 ({                                                                     \
-       __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n));       \
+       unsigned long __actu_ret;                                       \
+       uaccess_enable_not_uao();                                       \
+       __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),        \
+                                   (from), (n));                       \
+       uaccess_disable_not_uao();                                      \
+       __actu_ret;                                                     \
 })
 
 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
 #define raw_copy_in_user(to, from, n)                                  \
 ({                                                                     \
-       __arch_copy_in_user(__uaccess_mask_ptr(to),                     \
-                           __uaccess_mask_ptr(from), (n));             \
+       unsigned long __aciu_ret;                                       \
+       uaccess_enable_not_uao();                                       \
+       __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),        \
+                                   __uaccess_mask_ptr(from), (n));     \
+       uaccess_disable_not_uao();                                      \
+       __aciu_ret;                                                     \
 })
 
 #define INLINE_COPY_TO_USER
@@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
 {
-       if (access_ok(to, n))
+       if (access_ok(to, n)) {
+               uaccess_enable_not_uao();
                n = __arch_clear_user(__uaccess_mask_ptr(to), n);
+               uaccess_disable_not_uao();
+       }
        return n;
 }
 #define clear_user     __clear_user
index 322b556..aeafc03 100644 (file)
@@ -20,7 +20,6 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__arch_clear_user)
-       uaccess_enable_not_uao x2, x3, x4
        mov     x2, x1                  // save the size for fixup return
        subs    x1, x1, #8
        b.mi    2f
@@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
        b.mi    5f
 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:     mov     x0, #0
-       uaccess_disable_not_uao x2, x3
        ret
 ENDPROC(__arch_clear_user)
 EXPORT_SYMBOL(__arch_clear_user)
@@ -48,6 +46,5 @@ EXPORT_SYMBOL(__arch_clear_user)
        .section .fixup,"ax"
        .align  2
 9:     mov     x0, x2                  // return the original size
-       uaccess_disable_not_uao x2, x3
        ret
        .previous
index 8472dc7..ebb3c06 100644 (file)
 
 end    .req    x5
 ENTRY(__arch_copy_from_user)
-       uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
 #include "copy_template.S"
-       uaccess_disable_not_uao x3, x4
        mov     x0, #0                          // Nothing to copy
        ret
 ENDPROC(__arch_copy_from_user)
@@ -66,6 +64,5 @@ EXPORT_SYMBOL(__arch_copy_from_user)
        .section .fixup,"ax"
        .align  2
 9998:  sub     x0, end, dst                    // bytes not copied
-       uaccess_disable_not_uao x3, x4
        ret
        .previous
index 8e0355c..3d8153a 100644 (file)
 end    .req    x5
 
 ENTRY(__arch_copy_in_user)
-       uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
 #include "copy_template.S"
-       uaccess_disable_not_uao x3, x4
        mov     x0, #0
        ret
 ENDPROC(__arch_copy_in_user)
@@ -68,6 +66,5 @@ EXPORT_SYMBOL(__arch_copy_in_user)
        .section .fixup,"ax"
        .align  2
 9998:  sub     x0, end, dst                    // bytes not copied
-       uaccess_disable_not_uao x3, x4
        ret
        .previous
index 6085214..357eae2 100644 (file)
 
 end    .req    x5
 ENTRY(__arch_copy_to_user)
-       uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
 #include "copy_template.S"
-       uaccess_disable_not_uao x3, x4
        mov     x0, #0
        ret
 ENDPROC(__arch_copy_to_user)
@@ -65,6 +63,5 @@ EXPORT_SYMBOL(__arch_copy_to_user)
        .section .fixup,"ax"
        .align  2
 9998:  sub     x0, end, dst                    // bytes not copied
-       uaccess_disable_not_uao x3, x4
        ret
        .previous
index cbfcbe6..bfa30b7 100644 (file)
@@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
 unsigned long __copy_user_flushcache(void *to, const void __user *from,
                                     unsigned long n)
 {
-       unsigned long rc = __arch_copy_from_user(to, from, n);
+       unsigned long rc;
+
+       uaccess_enable_not_uao();
+       rc = __arch_copy_from_user(to, from, n);
+       uaccess_disable_not_uao();
 
        /* See above */
        __clean_dcache_area_pop(to, n - rc);