OSDN Git Service

Merge tag 'v4.9.95' into android-4.9.95
authorGreg Hackmann <ghackmann@google.com>
Fri, 20 Apr 2018 17:06:49 +0000 (10:06 -0700)
committerGreg Hackmann <ghackmann@google.com>
Fri, 20 Apr 2018 17:06:49 +0000 (10:06 -0700)
This is the 4.9.95 stable release

Change-Id: I7b8c0b5f4ea5afaddbf9b77813efe675332bced0
Signed-off-by: Greg Hackmann <ghackmann@google.com>
18 files changed:
1  2 
Makefile
arch/arm64/Kconfig
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/mm/context.c
arch/arm64/mm/fault.c
fs/namei.c
kernel/events/core.c

diff --cc Makefile
Simple merge
Simple merge
Simple merge
  #define ARM64_HAS_32BIT_EL0                   13
  #define ARM64_HYP_OFFSET_LOW                  14
  #define ARM64_MISMATCHED_CACHE_LINE_SIZE      15
 -#define ARM64_UNMAP_KERNEL_AT_EL0             16
 -#define ARM64_HARDEN_BRANCH_PREDICTOR         17
  
 -#define ARM64_NCAPS                           18
 +#define ARM64_UNMAP_KERNEL_AT_EL0             23
++#define ARM64_HARDEN_BRANCH_PREDICTOR         24
 +
- #define ARM64_NCAPS                           24
++#define ARM64_NCAPS                           25
  
  #endif /* __ASM_CPUCAPS_H */
@@@ -41,14 -44,14 +41,14 @@@ do {                                                                       
  "     .popsection\n"                                                  \
        _ASM_EXTABLE(1b, 4b)                                            \
        _ASM_EXTABLE(2b, 4b)                                            \
 -      ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,            \
 -                  CONFIG_ARM64_PAN)                                   \
        : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)       \
        : "r" (oparg), "Ir" (-EFAULT)                                   \
 -      : "memory")
 +      : "memory");                                                    \
 +      uaccess_disable();                                              \
 +} while (0)
  
  static inline int
- futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
+ futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *_uaddr)
  {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@@ -111,12 -115,14 +112,14 @@@ futex_atomic_cmpxchg_inatomic(u32 *uval
  {
        int ret = 0;
        u32 val, tmp;
+       u32 __user *uaddr;
  
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+       if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
                return -EFAULT;
  
+       uaddr = __uaccess_mask_ptr(_uaddr);
 +      uaccess_enable();
        asm volatile("// futex_atomic_cmpxchg_inatomic\n"
 -ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
  "     prfm    pstl1strm, %2\n"
  "1:   ldxr    %w1, %2\n"
  "     sub     %w3, %w1, %w4\n"
Simple merge
Simple merge
Simple merge
Simple merge
  #include <linux/string.h>
  #include <linux/thread_info.h>
  
 -#include <asm/alternative.h>
  #include <asm/cpufeature.h>
+ #include <asm/processor.h>
  #include <asm/ptrace.h>
 -#include <asm/sysreg.h>
  #include <asm/errno.h>
  #include <asm/memory.h>
  #include <asm/compiler.h>
@@@ -126,115 -136,26 +141,135 @@@ static inline unsigned long __range_ok(
        "       .popsection\n"
  
  /*
 + * User access enabling/disabling.
 + */
 +#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 +static inline void __uaccess_ttbr0_disable(void)
 +{
 +      unsigned long flags, ttbr;
 +
 +      local_irq_save(flags);
 +      ttbr = read_sysreg(ttbr1_el1);
 +      ttbr &= ~TTBR_ASID_MASK;
 +      /* reserved_ttbr0 placed at the end of swapper_pg_dir */
 +      write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
 +      isb();
 +      /* Set reserved ASID */
 +      write_sysreg(ttbr, ttbr1_el1);
 +      isb();
 +      local_irq_restore(flags);
 +}
 +
 +static inline void __uaccess_ttbr0_enable(void)
 +{
 +      unsigned long flags, ttbr0, ttbr1;
 +
 +      /*
 +       * Disable interrupts to avoid preemption between reading the 'ttbr0'
 +       * variable and the MSR. A context switch could trigger an ASID
 +       * roll-over and an update of 'ttbr0'.
 +       */
 +      local_irq_save(flags);
 +      ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
 +
 +      /* Restore active ASID */
 +      ttbr1 = read_sysreg(ttbr1_el1);
 +      ttbr1 &= ~TTBR_ASID_MASK;               /* safety measure */
 +      ttbr1 |= ttbr0 & TTBR_ASID_MASK;
 +      write_sysreg(ttbr1, ttbr1_el1);
 +      isb();
 +
 +      /* Restore user page table */
 +      write_sysreg(ttbr0, ttbr0_el1);
 +      isb();
 +      local_irq_restore(flags);
 +}
 +
 +static inline bool uaccess_ttbr0_disable(void)
 +{
 +      if (!system_uses_ttbr0_pan())
 +              return false;
 +      __uaccess_ttbr0_disable();
 +      return true;
 +}
 +
 +static inline bool uaccess_ttbr0_enable(void)
 +{
 +      if (!system_uses_ttbr0_pan())
 +              return false;
 +      __uaccess_ttbr0_enable();
 +      return true;
 +}
 +#else
 +static inline bool uaccess_ttbr0_disable(void)
 +{
 +      return false;
 +}
 +
 +static inline bool uaccess_ttbr0_enable(void)
 +{
 +      return false;
 +}
 +#endif
 +
 +#define __uaccess_disable(alt)                                                \
 +do {                                                                  \
 +      if (!uaccess_ttbr0_disable())                                   \
 +              asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,          \
 +                              CONFIG_ARM64_PAN));                     \
 +} while (0)
 +
 +#define __uaccess_enable(alt)                                         \
 +do {                                                                  \
 +      if (!uaccess_ttbr0_enable())                                    \
 +              asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,          \
 +                              CONFIG_ARM64_PAN));                     \
 +} while (0)
 +
 +static inline void uaccess_disable(void)
 +{
 +      __uaccess_disable(ARM64_HAS_PAN);
 +}
 +
 +static inline void uaccess_enable(void)
 +{
 +      __uaccess_enable(ARM64_HAS_PAN);
 +}
 +
 +/*
 + * These functions are no-ops when UAO is present.
 + */
 +static inline void uaccess_disable_not_uao(void)
 +{
 +      __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
 +}
 +
 +static inline void uaccess_enable_not_uao(void)
 +{
 +      __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
 +}
 +
 +/*
+  * Sanitise a uaccess pointer such that it becomes NULL if above the
+  * current addr_limit.
+  */
+ #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
+ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
+ {
+       void __user *safe_ptr;
+       asm volatile(
+       "       bics    xzr, %1, %2\n"
+       "       csel    %0, %1, xzr, eq\n"
+       : "=&r" (safe_ptr)
+       : "r" (ptr), "r" (current_thread_info()->addr_limit)
+       : "cc");
+       csdb();
+       return safe_ptr;
+ }
+ /*
   * The "__xxx" versions of the user access functions do not verify the address
   * space - it must have been done previously with a separate "access_ok()"
   * call.
@@@ -282,15 -204,21 +317,20 @@@ do {                                                                    
        default:                                                        \
                BUILD_BUG();                                            \
        }                                                               \
 +      uaccess_disable_not_uao();                                      \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 -      asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
 -                      CONFIG_ARM64_PAN));                             \
  } while (0)
  
- #define __get_user(x, ptr)                                            \
+ #define __get_user_check(x, ptr, err)                                 \
  ({                                                                    \
-       int __gu_err = 0;                                               \
-       __get_user_err((x), (ptr), __gu_err);                           \
-       __gu_err;                                                       \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {                \
+               __p = uaccess_mask_ptr(__p);                            \
+               __get_user_err((x), __p, (err));                        \
+       } else {                                                        \
+               (x) = 0; (err) = -EFAULT;                               \
+       }                                                               \
  })
  
  #define __get_user_error(x, ptr, err)                                 \
@@@ -349,14 -278,20 +389,19 @@@ do {                                                                    
        default:                                                        \
                BUILD_BUG();                                            \
        }                                                               \
 -      asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
 -                      CONFIG_ARM64_PAN));                             \
 +      uaccess_disable_not_uao();                                      \
  } while (0)
  
- #define __put_user(x, ptr)                                            \
+ #define __put_user_check(x, ptr, err)                                 \
  ({                                                                    \
-       int __pu_err = 0;                                               \
-       __put_user_err((x), (ptr), __pu_err);                           \
-       __pu_err;                                                       \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {               \
+               __p = uaccess_mask_ptr(__p);                            \
+               __put_user_err((x), __p, (err));                        \
+       } else  {                                                       \
+               (err) = -EFAULT;                                        \
+       }                                                               \
  })
  
  #define __put_user_error(x, ptr, err)                                 \
Simple merge
  #include <asm/irq.h>
  #include <asm/memory.h>
  #include <asm/mmu.h>
+ #include <asm/processor.h>
 +#include <asm/ptrace.h>
  #include <asm/thread_info.h>
 +#include <asm/uaccess.h>
  #include <asm/asm-uaccess.h>
  #include <asm/unistd.h>
  #include <asm/kernel-pgtable.h>
@@@ -29,8 -32,9 +29,8 @@@
   *
   * Alignment fixed up by hardware.
   */
- ENTRY(__clear_user)
+ ENTRY(__arch_clear_user)
 -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
 -          CONFIG_ARM64_PAN)
 +      uaccess_enable_not_uao x2, x3, x4
        mov     x2, x1                  // save the size for fixup return
        subs    x1, x1, #8
        b.mi    2f
@@@ -50,9 -54,10 +50,9 @@@ uao_user_alternative 9f, strh, sttrh, w
        b.mi    5f
  uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
  5:    mov     x0, #0
 -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
 -          CONFIG_ARM64_PAN)
 +      uaccess_disable_not_uao x2, x3
        ret
- ENDPROC(__clear_user)
+ ENDPROC(__arch_clear_user)
  
        .section .fixup,"ax"
        .align  2
        .endm
  
  end   .req    x5
- ENTRY(__copy_in_user)
+ ENTRY(__arch_copy_in_user)
 -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
 -          CONFIG_ARM64_PAN)
 +      uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
  #include "copy_template.S"
 -ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
 -          CONFIG_ARM64_PAN)
 +      uaccess_disable_not_uao x3, x4
        mov     x0, #0
        ret
- ENDPROC(__copy_in_user)
+ ENDPROC(__arch_copy_in_user)
  
        .section .fixup,"ax"
        .align  2
@@@ -230,12 -230,10 +230,15 @@@ void check_and_switch_context(struct mm
        raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
  
  switch_mm_fastpath:
 -      cpu_switch_mm(mm->pgd, mm);
+       arm64_apply_bp_hardening();
 +      /*
 +       * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
 +       * emulating PAN.
 +       */
 +      if (!system_uses_ttbr0_pan())
 +              cpu_switch_mm(mm->pgd, mm);
  }
  
  /* Errata workaround post TTBRx_EL1 update. */
@@@ -338,7 -332,7 +338,7 @@@ static int __kprobes do_page_fault(unsi
                mm_flags |= FAULT_FLAG_WRITE;
        }
  
-       if (addr < USER_DS && is_permission_fault(esr, regs)) {
 -      if (is_permission_fault(esr) && (addr < TASK_SIZE)) {
++      if (addr < TASK_SIZE && is_permission_fault(esr, regs)) {
                /* regs->orig_addr_limit may be 0 if we entered from EL0 */
                if (regs->orig_addr_limit == KERNEL_DS)
                        die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
diff --cc fs/namei.c
Simple merge
Simple merge