OSDN Git Service

Merge tag 'v4.9.95' into android-4.9.95
[android-x86/kernel.git] / arch / arm64 / kernel / entry.S
index e10e9f2..ef3b9a0 100644 (file)
@@ -31,7 +31,9 @@
 #include <asm/memory.h>
 #include <asm/mmu.h>
 #include <asm/processor.h>
+#include <asm/ptrace.h>
 #include <asm/thread_info.h>
+#include <asm/uaccess.h>
 #include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 #include <asm/kernel-pgtable.h>
@@ -126,16 +128,42 @@ alternative_else_nop_endif
        .else
        add     x21, sp, #S_FRAME_SIZE
        get_thread_info tsk
-       /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+       /* Save the task's original addr_limit and set USER_DS */
        ldr     x20, [tsk, #TI_ADDR_LIMIT]
        str     x20, [sp, #S_ORIG_ADDR_LIMIT]
-       mov     x20, #TASK_SIZE_64
+       mov     x20, #USER_DS
        str     x20, [tsk, #TI_ADDR_LIMIT]
        /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
        .endif /* \el == 0 */
        mrs     x22, elr_el1
        mrs     x23, spsr_el1
        stp     lr, x21, [sp, #S_LR]
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       /*
+        * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
+        * EL0, there is no need to check the state of TTBR0_EL1 since
+        * accesses are always enabled.
+        * Note that the meaning of this bit differs from the ARMv8.1 PAN
+        * feature as all TTBR0_EL1 accesses are disabled, not just those to
+        * user mappings.
+        */
+alternative_if ARM64_HAS_PAN
+       b       1f                              // skip TTBR0 PAN
+alternative_else_nop_endif
+
+       .if     \el != 0
+       mrs     x21, ttbr0_el1
+       tst     x21, #TTBR_ASID_MASK            // Check for the reserved ASID
+       orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
+       b.eq    1f                              // TTBR0 access already disabled
+       and     x23, x23, #~PSR_PAN_BIT         // Clear the emulated PAN in the saved SPSR
+       .endif
+
+       __uaccess_ttbr0_disable x21
+1:
+#endif
+
        stp     x22, x23, [sp, #S_PC]
 
        /*
@@ -174,6 +202,40 @@ alternative_else_nop_endif
        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
        .if     \el == 0
        ct_user_enter
+       .endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       /*
+        * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
+        * PAN bit checking.
+        */
+alternative_if ARM64_HAS_PAN
+       b       2f                              // skip TTBR0 PAN
+alternative_else_nop_endif
+
+       .if     \el != 0
+       tbnz    x22, #22, 1f                    // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
+       .endif
+
+       __uaccess_ttbr0_enable x0, x1
+
+       .if     \el == 0
+       /*
+        * Enable errata workarounds only if returning to user. The only
+        * workaround currently required for TTBR0_EL1 changes are for the
+        * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+        * corruption).
+        */
+       bl      post_ttbr_update_workaround
+       .endif
+1:
+       .if     \el != 0
+       and     x22, x22, #~PSR_PAN_BIT         // ARMv8.0 CPUs do not understand this bit
+       .endif
+2:
+#endif
+
+       .if     \el == 0
        ldr     x23, [sp, #S_SP]                // load return stack pointer
        msr     sp_el0, x23
        tst     x22, #PSR_MODE32_BIT            // native task?
@@ -191,6 +253,7 @@ alternative_else_nop_endif
 #endif
 3:
        .endif
+
        msr     elr_el1, x21                    // set up the return data
        msr     spsr_el1, x22
        ldp     x0, x1, [sp, #16 * 0]
@@ -227,10 +290,6 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
        .endif
        .endm
 
-       .macro  get_thread_info, rd
-       mrs     \rd, sp_el0
-       .endm
-
        .macro  irq_stack_entry
        mov     x19, sp                 // preserve the original sp
 
@@ -589,13 +648,15 @@ el0_ia:
         * Instruction abort handling
         */
        mrs     x26, far_el1
-       // enable interrupts before calling the main handler
-       enable_dbg_and_irq
+       msr     daifclr, #(8 | 4 | 1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+       bl      trace_hardirqs_off
+#endif
        ct_user_exit
        mov     x0, x26
        mov     x1, x25
        mov     x2, sp
-       bl      do_mem_abort
+       bl      do_el0_ia_bp_hardening
        b       ret_to_user
 el0_fpsimd_acc:
        /*
@@ -622,8 +683,10 @@ el0_sp_pc:
         * Stack or PC alignment exception handling
         */
        mrs     x26, far_el1
-       // enable interrupts before calling the main handler
-       enable_dbg_and_irq
+       enable_dbg
+#ifdef CONFIG_TRACE_IRQFLAGS
+       bl      trace_hardirqs_off
+#endif
        ct_user_exit
        mov     x0, x26
        mov     x1, x25
@@ -682,6 +745,11 @@ el0_irq_naked:
 #endif
 
        ct_user_exit
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+       tbz     x22, #55, 1f
+       bl      do_el0_irq_bp_hardening
+1:
+#endif
        irq_handler
 
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -795,6 +863,7 @@ el0_svc_naked:                                      // compat entry point
        b.ne    __sys_trace
        cmp     scno, sc_nr                     // check upper syscall limit
        b.hs    ni_sys
+       mask_nospec64 scno, sc_nr, x19  // enforce bounds for syscall number
        ldr     x16, [stbl, scno, lsl #3]       // address in the syscall table
        blr     x16                             // call sys_* routine
        b       ret_fast_syscall
@@ -851,14 +920,24 @@ __ni_sys_trace:
 
        .macro tramp_map_kernel, tmp
        mrs     \tmp, ttbr1_el1
-       sub     \tmp, \tmp, #SWAPPER_DIR_SIZE
+       sub     \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
        bic     \tmp, \tmp, #USER_ASID_FLAG
        msr     ttbr1_el1, \tmp
+#ifdef CONFIG_ARCH_MSM8996
+       /* ASID already in \tmp[63:48] */
+       movk    \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
+       movk    \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
+       /* 2MB boundary containing the vectors, so we nobble the walk cache */
+       movk    \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
+       isb
+       tlbi    vae1, \tmp
+       dsb     nsh
+#endif /* CONFIG_ARCH_MSM8996 */
        .endm
 
        .macro tramp_unmap_kernel, tmp
        mrs     \tmp, ttbr1_el1
-       add     \tmp, \tmp, #SWAPPER_DIR_SIZE
+       add     \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
        orr     \tmp, \tmp, #USER_ASID_FLAG
        msr     ttbr1_el1, \tmp
        /*
@@ -885,7 +964,9 @@ __ni_sys_trace:
        tramp_map_kernel        x30
 #ifdef CONFIG_RANDOMIZE_BASE
        adr     x30, tramp_vectors + PAGE_SIZE
+#ifndef CONFIG_ARCH_MSM8996
        isb
+#endif
        ldr     x30, [x30]
 #else
        ldr     x30, =vectors