OSDN Git Service

Revert "FROMLIST: arm64: Disable TTBR0_EL1 during normal kernel execution"
authorSami Tolvanen <samitolvanen@google.com>
Wed, 14 Dec 2016 20:32:25 +0000 (12:32 -0800)
committerSami Tolvanen <samitolvanen@google.com>
Wed, 4 Jan 2017 17:02:08 +0000 (09:02 -0800)
This reverts commit 5775ca34829caf0664c8ccc02fd0e93cb6022e0f.

Bug: 31432001
Change-Id: I9b07c2f01bc2bcfed51f60ab487034639f5e1960
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/ptrace.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/setup.c
arch/arm64/mm/context.c

index 932f5a5..8e88a69 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef _ASM_EFI_H
 #define _ASM_EFI_H
 
-#include <asm/cpufeature.h>
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/neon.h>
@@ -70,30 +69,7 @@ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
 
 static inline void efi_set_pgd(struct mm_struct *mm)
 {
-       __switch_mm(mm);
-
-       if (system_uses_ttbr0_pan()) {
-               if (mm != current->active_mm) {
-                       /*
-                        * Update the current thread's saved ttbr0 since it is
-                        * restored as part of a return from exception. Set
-                        * the hardware TTBR0_EL1 using cpu_switch_mm()
-                        * directly to enable potential errata workarounds.
-                        */
-                       update_saved_ttbr0(current, mm);
-                       cpu_switch_mm(mm->pgd, mm);
-               } else {
-                       /*
-                        * Defer the switch to the current thread's TTBR0_EL1
-                        * until uaccess_enable(). Restore the current
-                        * thread's saved ttbr0 corresponding to its active_mm
-                        * (if different from init_mm).
-                        */
-                       cpu_set_reserved_ttbr0();
-                       if (current->active_mm != &init_mm)
-                               update_saved_ttbr0(current, current->active_mm);
-               }
-       }
+       switch_mm(NULL, mm, NULL);
 }
 
 void efi_virtmap_load(void);
index 4a32fd5..a00f7cf 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/sched.h>
 
 #include <asm/cacheflush.h>
-#include <asm/cpufeature.h>
 #include <asm/proc-fns.h>
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
@@ -114,7 +113,7 @@ static inline void cpu_uninstall_idmap(void)
        local_flush_tlb_all();
        cpu_set_default_tcr_t0sz();
 
-       if (mm != &init_mm && !system_uses_ttbr0_pan())
+       if (mm != &init_mm)
                cpu_switch_mm(mm->pgd, mm);
 }
 
@@ -174,27 +173,21 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
 
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-static inline void update_saved_ttbr0(struct task_struct *tsk,
-                                     struct mm_struct *mm)
-{
-       if (system_uses_ttbr0_pan()) {
-               BUG_ON(mm->pgd == swapper_pg_dir);
-               task_thread_info(tsk)->ttbr0 =
-                       virt_to_phys(mm->pgd) | ASID(mm) << 48;
-       }
-}
-#else
-static inline void update_saved_ttbr0(struct task_struct *tsk,
-                                     struct mm_struct *mm)
-{
-}
-#endif
-
-static inline void __switch_mm(struct mm_struct *next)
+/*
+ * This is the actual mm switch as far as the scheduler
+ * is concerned.  No registers are touched.  We avoid
+ * calling the CPU specific function when the mm hasn't
+ * actually changed.
+ */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+         struct task_struct *tsk)
 {
        unsigned int cpu = smp_processor_id();
 
+       if (prev == next)
+               return;
+
        /*
         * init_mm.pgd does not contain any user mappings and it is always
         * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
@@ -207,23 +200,7 @@ static inline void __switch_mm(struct mm_struct *next)
        check_and_switch_context(next, cpu);
 }
 
-static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
-         struct task_struct *tsk)
-{
-       if (prev != next)
-               __switch_mm(next);
-
-       /*
-        * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
-        * value may have not been initialised yet (activate_mm caller) or the
-        * ASID has changed since the last run (following the context switch
-        * of another thread of the same process).
-        */
-       update_saved_ttbr0(tsk, next);
-}
-
 #define deactivate_mm(tsk,mm)  do { } while (0)
-#define activate_mm(prev,next) switch_mm(prev, next, current)
+#define activate_mm(prev,next) switch_mm(prev, next, NULL)
 
 #endif
index dd4257e..200d5c3 100644 (file)
@@ -21,8 +21,6 @@
 
 #include <uapi/asm/ptrace.h>
 
-#define _PSR_PAN_BIT           22
-
 /* Current Exception Level values, as contained in CurrentEL */
 #define CurrentEL_EL1          (1 << 2)
 #define CurrentEL_EL2          (2 << 2)
index eb185c9..8eb8eb0 100644 (file)
@@ -29,9 +29,7 @@
 #include <asm/esr.h>
 #include <asm/irq.h>
 #include <asm/memory.h>
-#include <asm/ptrace.h>
 #include <asm/thread_info.h>
-#include <asm/uaccess.h>
 #include <asm/unistd.h>
 
 /*
        mrs     x22, elr_el1
        mrs     x23, spsr_el1
        stp     lr, x21, [sp, #S_LR]
-
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-       /*
-        * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
-        * EL0, there is no need to check the state of TTBR0_EL1 since
-        * accesses are always enabled.
-        * Note that the meaning of this bit differs from the ARMv8.1 PAN
-        * feature as all TTBR0_EL1 accesses are disabled, not just those to
-        * user mappings.
-        */
-alternative_if_not ARM64_HAS_PAN
-       nop
-alternative_else
-       b       1f                              // skip TTBR0 PAN
-alternative_endif
-
-       .if     \el != 0
-       mrs     x21, ttbr0_el1
-       tst     x21, #0xffff << 48              // Check for the reserved ASID
-       orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
-       b.eq    1f                              // TTBR0 access already disabled
-       and     x23, x23, #~PSR_PAN_BIT         // Clear the emulated PAN in the saved SPSR
-       .endif
-
-       uaccess_ttbr0_disable x21
-1:
-#endif
-
        stp     x22, x23, [sp, #S_PC]
 
        /*
@@ -177,42 +147,6 @@ alternative_endif
        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
        .if     \el == 0
        ct_user_enter
-       .endif
-
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-       /*
-        * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
-        * PAN bit checking.
-        */
-alternative_if_not ARM64_HAS_PAN
-       nop
-alternative_else
-       b       2f                              // skip TTBR0 PAN
-alternative_endif
-
-       .if     \el != 0
-       tbnz    x22, #_PSR_PAN_BIT, 1f          // Skip re-enabling TTBR0 access if previously disabled
-       .endif
-
-       uaccess_ttbr0_enable x0
-
-       .if     \el == 0
-       /*
-        * Enable errata workarounds only if returning to user. The only
-        * workaround currently required for TTBR0_EL1 changes are for the
-        * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
-        * corruption).
-        */
-       post_ttbr0_update_workaround
-       .endif
-1:
-       .if     \el != 0
-       and     x22, x22, #~PSR_PAN_BIT         // ARMv8.0 CPUs do not understand this bit
-       .endif
-2:
-#endif
-
-       .if     \el == 0
        ldr     x23, [sp, #S_SP]                // load return stack pointer
        msr     sp_el0, x23
 #ifdef CONFIG_ARM64_ERRATUM_845719
@@ -234,7 +168,6 @@ alternative_else
 alternative_endif
 #endif
        .endif
-
        msr     elr_el1, x21                    // set up the return data
        msr     spsr_el1, x22
        ldp     x0, x1, [sp, #16 * 0]
index 6591bf2..29b8c24 100644 (file)
@@ -347,15 +347,6 @@ void __init setup_arch(char **cmdline_p)
        smp_init_cpus();
        smp_build_mpidr_hash();
 
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-       /*
-        * Make sure init_thread_info.ttbr0 always generates translation
-        * faults in case uaccess_enable() is inadvertently called by the init
-        * thread.
-        */
-       init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
-#endif
-
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
        conswitchp = &vga_con;
index 2512808..7275628 100644 (file)
@@ -182,12 +182,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
        raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
 switch_mm_fastpath:
-       /*
-        * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
-        * emulating PAN.
-        */
-       if (!system_uses_ttbr0_pan())
-               cpu_switch_mm(mm->pgd, mm);
+       cpu_switch_mm(mm->pgd, mm);
 }
 
 static int asids_init(void)