OSDN Git Service

x86/cpu: Cleanup the untrain mess
authorPeter Zijlstra <peterz@infradead.org>
Mon, 14 Aug 2023 11:44:34 +0000 (13:44 +0200)
committerBorislav Petkov (AMD) <bp@alien8.de>
Wed, 16 Aug 2023 19:58:59 +0000 (21:58 +0200)
Since there can only be one active return_thunk, there only needs be
one (matching) untrain_ret. It fundamentally doesn't make sense to
allow multiple untrain_ret at the same time.

Fold all the 3 different untrain methods into a single (temporary)
helper stub.

Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121149.042774962@infradead.org
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c
arch/x86/lib/retpoline.S

index f7c3375..5285c8e 100644 (file)
 .endm
 
 #ifdef CONFIG_CPU_UNRET_ENTRY
-#define CALL_ZEN_UNTRAIN_RET   "call retbleed_untrain_ret"
+#define CALL_UNTRAIN_RET       "call entry_untrain_ret"
 #else
-#define CALL_ZEN_UNTRAIN_RET   ""
+#define CALL_UNTRAIN_RET       ""
 #endif
 
 /*
        defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
        VALIDATE_UNRET_END
        ALTERNATIVE_3 "",                                               \
-                     CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET,          \
+                     CALL_UNTRAIN_RET, X86_FEATURE_UNRET,              \
                      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB,        \
                      __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
 #endif
-
-#ifdef CONFIG_CPU_SRSO
-       ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
-                         "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
-#endif
 .endm
 
 .macro UNTRAIN_RET_FROM_CALL
        defined(CONFIG_CALL_DEPTH_TRACKING)
        VALIDATE_UNRET_END
        ALTERNATIVE_3 "",                                               \
-                     CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET,          \
+                     CALL_UNTRAIN_RET, X86_FEATURE_UNRET,              \
                      "call entry_ibpb", X86_FEATURE_ENTRY_IBPB,        \
                      __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
 #endif
-
-#ifdef CONFIG_CPU_SRSO
-       ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
-                         "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
-#endif
 .endm
 
 
@@ -355,6 +345,7 @@ extern void retbleed_untrain_ret(void);
 extern void srso_untrain_ret(void);
 extern void srso_alias_untrain_ret(void);
 
+extern void entry_untrain_ret(void);
 extern void entry_ibpb(void);
 
 extern void (*x86_return_thunk)(void);
index bbbbda9..6f3e195 100644 (file)
@@ -2460,6 +2460,7 @@ static void __init srso_select_mitigation(void)
                         * like ftrace, static_call, etc.
                         */
                        setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+                       setup_force_cpu_cap(X86_FEATURE_UNRET);
 
                        if (boot_cpu_data.x86 == 0x19) {
                                setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
index d37e5ab..5e85da1 100644 (file)
@@ -289,6 +289,13 @@ SYM_CODE_START(srso_return_thunk)
        ud2
 SYM_CODE_END(srso_return_thunk)
 
+SYM_FUNC_START(entry_untrain_ret)
+       ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
+                     "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
+                     "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+SYM_FUNC_END(entry_untrain_ret)
+__EXPORT_THUNK(entry_untrain_ret)
+
 SYM_CODE_START(__x86_return_thunk)
        UNWIND_HINT_FUNC
        ANNOTATE_NOENDBR