OSDN Git Service

arm64: Remove Spectre-related CONFIG_* options
authorWill Deacon <will@kernel.org>
Tue, 15 Sep 2020 21:11:13 +0000 (22:11 +0100)
committerWill Deacon <will@kernel.org>
Tue, 29 Sep 2020 15:08:15 +0000 (16:08 +0100)
The spectre mitigations are too configurable for their own good, leading
to confusing logic trying to figure out when we should mitigate and when
we shouldn't. Although the plethora of command-line options need to stick
around for backwards compatibility, the default-on CONFIG options that
depend on EXPERT can be dropped, as the mitigations only do anything if
the system is vulnerable, a mitigation is available and the command-line
hasn't disabled it.

Remove CONFIG_HARDEN_BRANCH_PREDICTOR and CONFIG_ARM64_SSBD in favour of
enabling this code unconditionally.

Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/Kconfig
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/mmu.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kvm/Kconfig
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/include/hyp/switch.h

index 6d23283..5125927 100644 (file)
@@ -1165,32 +1165,6 @@ config UNMAP_KERNEL_AT_EL0
 
          If unsure, say Y.
 
-config HARDEN_BRANCH_PREDICTOR
-       bool "Harden the branch predictor against aliasing attacks" if EXPERT
-       default y
-       help
-         Speculation attacks against some high-performance processors rely on
-         being able to manipulate the branch predictor for a victim context by
-         executing aliasing branches in the attacker context.  Such attacks
-         can be partially mitigated against by clearing internal branch
-         predictor state and limiting the prediction logic in some situations.
-
-         This config option will take CPU-specific actions to harden the
-         branch predictor against aliasing attacks and may rely on specific
-         instruction sequences or control bits being set by the system
-         firmware.
-
-         If unsure, say Y.
-
-config ARM64_SSBD
-       bool "Speculative Store Bypass Disable" if EXPERT
-       default y
-       help
-         This enables mitigation of the bypassing of previous stores
-         by speculative loads.
-
-         If unsure, say Y.
-
 config RODATA_FULL_DEFAULT_ENABLED
        bool "Apply r/o permissions of VM areas also to their linear aliases"
        default y
index 89b4f01..851d144 100644 (file)
@@ -712,12 +712,8 @@ int get_spectre_v2_workaround_state(void);
 
 static inline int arm64_get_ssbd_state(void)
 {
-#ifdef CONFIG_ARM64_SSBD
        extern int ssbd_state;
        return ssbd_state;
-#else
-       return ARM64_SSBD_UNKNOWN;
-#endif
 }
 
 void arm64_set_ssbd_mitigation(bool state);
index 189839c..1df85a3 100644 (file)
@@ -527,7 +527,6 @@ static inline int kvm_map_vectors(void)
 }
 #endif
 
-#ifdef CONFIG_ARM64_SSBD
 DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 
 static inline int hyp_map_aux_data(void)
@@ -544,12 +543,6 @@ static inline int hyp_map_aux_data(void)
        }
        return 0;
 }
-#else
-static inline int hyp_map_aux_data(void)
-{
-       return 0;
-}
-#endif
 
 #define kvm_phys_to_vttbr(addr)                phys_to_ttbr(addr)
 
index a7a5eca..f5e3efe 100644 (file)
@@ -45,7 +45,6 @@ struct bp_hardening_data {
        bp_hardening_cb_t       fn;
 };
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 
 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
@@ -64,14 +63,6 @@ static inline void arm64_apply_bp_hardening(void)
        if (d->fn)
                d->fn();
 }
-#else
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
-{
-       return NULL;
-}
-
-static inline void arm64_apply_bp_hardening(void)      { }
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
 
 extern void arm64_memblock_init(void);
 extern void paging_init(void);
index a561cbb..ed8799b 100644 (file)
@@ -19,7 +19,7 @@ obj-y                 := debug-monitors.o entry.o irq.o fpsimd.o              \
                           return_address.o cpuinfo.o cpu_errata.o              \
                           cpufeature.o alternative.o cacheinfo.o               \
                           smp.o smp_spin_table.o topology.o smccc-call.o       \
-                          syscall.o
+                          ssbd.o syscall.o
 
 targets                        += efi-entry.o
 
@@ -59,7 +59,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
 obj-$(CONFIG_CRASH_DUMP)               += crash_dump.o
 obj-$(CONFIG_CRASH_CORE)               += crash_core.o
 obj-$(CONFIG_ARM_SDE_INTERFACE)                += sdei.o
-obj-$(CONFIG_ARM64_SSBD)               += ssbd.o
 obj-$(CONFIG_ARM64_PTR_AUTH)           += pointer_auth.o
 obj-$(CONFIG_SHADOW_CALL_STACK)                += scs.o
 
index abfef5f..dd91039 100644 (file)
@@ -254,9 +254,7 @@ static int detect_harden_bp_fw(void)
            ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
                cb = qcom_link_stack_sanitization;
 
-       if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
-               install_bp_hardening_cb(cb, smccc_start, smccc_end);
-
+       install_bp_hardening_cb(cb, smccc_start, smccc_end);
        return 1;
 }
 
@@ -335,11 +333,6 @@ void arm64_set_ssbd_mitigation(bool state)
 {
        int conduit;
 
-       if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
-               pr_info_once("SSBD disabled by kernel configuration\n");
-               return;
-       }
-
        if (this_cpu_has_cap(ARM64_SSBS)) {
                if (state)
                        asm volatile(SET_PSTATE_SSBS(0));
@@ -584,12 +577,6 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
 
        __spectrev2_safe = false;
 
-       if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
-               pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
-               __hardenbp_enab = false;
-               return false;
-       }
-
        /* forced off */
        if (__nospectre_v2 || cpu_mitigations_off()) {
                pr_info_once("spectrev2 mitigation disabled by command line option\n");
@@ -1004,9 +991,7 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev,
        switch (ssbd_state) {
        case ARM64_SSBD_KERNEL:
        case ARM64_SSBD_FORCE_ENABLE:
-               if (IS_ENABLED(CONFIG_ARM64_SSBD))
-                       return sprintf(buf,
-                           "Mitigation: Speculative Store Bypass disabled via prctl\n");
+               return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
        }
 
        return sprintf(buf, "Vulnerable\n");
index 6424584..4bb45b1 100644 (file)
@@ -1583,7 +1583,6 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
        WARN_ON(val & (7 << 27 | 7 << 21));
 }
 
-#ifdef CONFIG_ARM64_SSBD
 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
 {
        if (user_mode(regs))
@@ -1623,7 +1622,6 @@ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
                arm64_set_ssbd_mitigation(true);
        }
 }
-#endif /* CONFIG_ARM64_SSBD */
 
 #ifdef CONFIG_ARM64_PAN
 static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
@@ -1976,7 +1974,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
                .min_field_value = 1,
        },
-#ifdef CONFIG_ARM64_SSBD
        {
                .desc = "Speculative Store Bypassing Safe (SSBS)",
                .capability = ARM64_SSBS,
@@ -1988,7 +1985,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
                .cpu_enable = cpu_enable_ssbs,
        },
-#endif
 #ifdef CONFIG_ARM64_CNP
        {
                .desc = "Common not Private translations",
index 55af8b5..81b7093 100644 (file)
@@ -132,7 +132,6 @@ alternative_else_nop_endif
         * them if required.
         */
        .macro  apply_ssbd, state, tmp1, tmp2
-#ifdef CONFIG_ARM64_SSBD
 alternative_cb arm64_enable_wa2_handling
        b       .L__asm_ssbd_skip\@
 alternative_cb_end
@@ -146,7 +145,6 @@ alternative_cb      arm64_update_smccc_conduit
        nop                                     // Patched to SMC/HVC #0
 alternative_cb_end
 .L__asm_ssbd_skip\@:
-#endif
        .endm
 
        .macro  kernel_entry, el, regsize = 64
@@ -697,11 +695,9 @@ el0_irq_naked:
        bl      trace_hardirqs_off
 #endif
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
        tbz     x22, #55, 1f
        bl      do_el0_irq_bp_hardening
 1:
-#endif
        irq_handler
 
 #ifdef CONFIG_TRACE_IRQFLAGS
index 318c8f2..42e5895 100644 (file)
@@ -58,7 +58,7 @@ config KVM_ARM_PMU
          virtual machines.
 
 config KVM_INDIRECT_VECTORS
-       def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
+       def_bool RANDOMIZE_BASE
 
 endif # KVM
 
index 46b4dab..41698ba 100644 (file)
@@ -116,7 +116,6 @@ el1_hvc_guest:
                          ARM_SMCCC_ARCH_WORKAROUND_2)
        cbnz    w1, el1_trap
 
-#ifdef CONFIG_ARM64_SSBD
 alternative_cb arm64_enable_wa2_handling
        b       wa2_end
 alternative_cb_end
@@ -143,7 +142,6 @@ alternative_cb_end
 wa2_end:
        mov     x2, xzr
        mov     x1, xzr
-#endif
 
 wa_epilogue:
        mov     x0, xzr
index 5b6b8fa..b503f19 100644 (file)
@@ -489,7 +489,6 @@ static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
 
 static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
 {
-#ifdef CONFIG_ARM64_SSBD
        /*
         * The host runs with the workaround always present. If the
         * guest wants it disabled, so be it...
@@ -497,19 +496,16 @@ static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
        if (__needs_ssbd_off(vcpu) &&
            __hyp_this_cpu_read(arm64_ssbd_callback_required))
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
-#endif
 }
 
 static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
 {
-#ifdef CONFIG_ARM64_SSBD
        /*
         * If the guest has disabled the workaround, bring it back on.
         */
        if (__needs_ssbd_off(vcpu) &&
            __hyp_this_cpu_read(arm64_ssbd_callback_required))
                arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
-#endif
 }
 
 static inline void __kvm_unexpected_el2_exception(void)