2 * Copyright (C) 1994 Linus Torvalds
4 * Cyrix stuff, June 1998 by:
5 * - Rafael R. Reilova (moved everything from head.S),
6 * <rreilova@ececs.uc.edu>
7 * - Channing Corn (tests & fixes),
8 * - Andrew D. Balsa (code cleanup).
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
17 #include <asm/spec-ctrl.h>
18 #include <asm/cmdline.h>
20 #include <asm/processor.h>
21 #include <asm/processor-flags.h>
22 #include <asm/fpu/internal.h>
25 #include <asm/paravirt.h>
26 #include <asm/alternative.h>
27 #include <asm/pgtable.h>
28 #include <asm/cacheflush.h>
29 #include <asm/intel-family.h>
32 static void __init spectre_v2_select_mitigation(void);
33 static void __init ssb_select_mitigation(void);
34 static void __init l1tf_select_mitigation(void);
36 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
37 u64 x86_spec_ctrl_base;
38 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
39 static DEFINE_MUTEX(spec_ctrl_mutex);
42 * The vendor and possibly platform specific bits which can be modified in
45 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
48 * AMD specific MSR info for Speculative Store Bypass control.
49 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
51 u64 __ro_after_init x86_amd_ls_cfg_base;
52 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
54 void __init check_bugs(void)
59 * identify_boot_cpu() initialized SMT support information, let the
62 cpu_smt_check_topology_early();
64 if (!IS_ENABLED(CONFIG_SMP)) {
66 print_cpu_info(&boot_cpu_data);
70 * Read the SPEC_CTRL MSR to account for reserved bits which may
71 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
72 * init code as it is not enumerated and depends on the family.
74 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
75 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
77 /* Allow STIBP in MSR_SPEC_CTRL if supported */
78 if (boot_cpu_has(X86_FEATURE_STIBP))
79 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
81 /* Select the proper spectre mitigation before patching alternatives */
82 spectre_v2_select_mitigation();
85 * Select proper mitigation for any exposure to the Speculative Store
86 * Bypass vulnerability.
88 ssb_select_mitigation();
90 l1tf_select_mitigation();
94 * Check whether we are able to run this kernel safely on SMP.
96 * - i386 is no longer supported.
97 * - In order to run on anything without a TSC, we need to be
98 * compiled for a i486.
100 if (boot_cpu_data.x86 < 4)
101 panic("Kernel requires i486+ for 'invlpg' and other features");
103 init_utsname()->machine[1] =
104 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
105 alternative_instructions();
107 fpu__init_check_bugs();
108 #else /* CONFIG_X86_64 */
109 alternative_instructions();
112 * Make sure the first 2MB area is not mapped by huge pages
113 * There are typically fixed size MTRRs in there and overlapping
114 * MTRRs into large pages causes slow downs.
116 * Right now we don't do that with gbpages because there seems
117 * very little benefit for that case.
120 set_memory_4k((unsigned long)__va(0), 1);
124 /* The kernel command line selection */
125 enum spectre_v2_mitigation_cmd {
128 SPECTRE_V2_CMD_FORCE,
129 SPECTRE_V2_CMD_RETPOLINE,
130 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
131 SPECTRE_V2_CMD_RETPOLINE_AMD,
134 static const char *spectre_v2_strings[] = {
135 [SPECTRE_V2_NONE] = "Vulnerable",
136 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
137 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
138 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
139 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
140 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
144 #define pr_fmt(fmt) "Spectre V2 : " fmt
146 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
150 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
152 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
153 struct thread_info *ti = current_thread_info();
155 /* Is MSR_SPEC_CTRL implemented ? */
156 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
158 * Restrict guest_spec_ctrl to supported values. Clear the
159 * modifiable bits in the host base value and or the
160 * modifiable bits from the guest value.
162 guestval = hostval & ~x86_spec_ctrl_mask;
163 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
165 /* SSBD controlled in MSR_SPEC_CTRL */
166 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
167 static_cpu_has(X86_FEATURE_AMD_SSBD))
168 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
170 if (hostval != guestval) {
171 msrval = setguest ? guestval : hostval;
172 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
177 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
178 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
180 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
181 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
185 * If the host has SSBD mitigation enabled, force it in the host's
186 * virtual MSR value. If its not permanently enabled, evaluate
187 * current's TIF_SSBD thread flag.
189 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
190 hostval = SPEC_CTRL_SSBD;
192 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
194 /* Sanitize the guest value */
195 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
197 if (hostval != guestval) {
200 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
201 ssbd_spec_ctrl_to_tif(hostval);
203 speculation_ctrl_update(tif);
206 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
208 static void x86_amd_ssb_disable(void)
210 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
212 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
213 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
214 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
215 wrmsrl(MSR_AMD64_LS_CFG, msrval);
219 static bool spectre_v2_bad_module;
221 bool retpoline_module_ok(bool has_retpoline)
223 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
226 pr_err("System may be vulnerable to spectre v2\n");
227 spectre_v2_bad_module = true;
231 static inline const char *spectre_v2_module_string(void)
233 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
236 static inline const char *spectre_v2_module_string(void) { return ""; }
239 static void __init spec2_print_if_insecure(const char *reason)
241 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
242 pr_info("%s selected on command line.\n", reason);
245 static void __init spec2_print_if_secure(const char *reason)
247 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
248 pr_info("%s selected on command line.\n", reason);
251 static inline bool retp_compiler(void)
253 return __is_defined(RETPOLINE);
256 static inline bool match_option(const char *arg, int arglen, const char *opt)
258 int len = strlen(opt);
260 return len == arglen && !strncmp(arg, opt, len);
263 static const struct {
265 enum spectre_v2_mitigation_cmd cmd;
267 } mitigation_options[] = {
268 { "off", SPECTRE_V2_CMD_NONE, false },
269 { "on", SPECTRE_V2_CMD_FORCE, true },
270 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
271 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
272 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
273 { "auto", SPECTRE_V2_CMD_AUTO, false },
276 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
280 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
282 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
283 return SPECTRE_V2_CMD_NONE;
285 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
287 return SPECTRE_V2_CMD_AUTO;
289 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
290 if (!match_option(arg, ret, mitigation_options[i].option))
292 cmd = mitigation_options[i].cmd;
296 if (i >= ARRAY_SIZE(mitigation_options)) {
297 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
298 return SPECTRE_V2_CMD_AUTO;
301 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
302 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
303 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
304 !IS_ENABLED(CONFIG_RETPOLINE)) {
305 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
306 return SPECTRE_V2_CMD_AUTO;
309 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
310 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
311 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
312 return SPECTRE_V2_CMD_AUTO;
315 if (mitigation_options[i].secure)
316 spec2_print_if_secure(mitigation_options[i].option);
318 spec2_print_if_insecure(mitigation_options[i].option);
323 static bool stibp_needed(void)
325 if (spectre_v2_enabled == SPECTRE_V2_NONE)
328 /* Enhanced IBRS makes using STIBP unnecessary. */
329 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
332 if (!boot_cpu_has(X86_FEATURE_STIBP))
338 static void update_stibp_msr(void *info)
340 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
343 void arch_smt_update(void)
350 mutex_lock(&spec_ctrl_mutex);
351 mask = x86_spec_ctrl_base;
352 if (cpu_smt_control == CPU_SMT_ENABLED)
353 mask |= SPEC_CTRL_STIBP;
355 mask &= ~SPEC_CTRL_STIBP;
357 if (mask != x86_spec_ctrl_base) {
358 pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
359 cpu_smt_control == CPU_SMT_ENABLED ?
360 "Enabling" : "Disabling");
361 x86_spec_ctrl_base = mask;
362 on_each_cpu(update_stibp_msr, NULL, 1);
364 mutex_unlock(&spec_ctrl_mutex);
367 static void __init spectre_v2_select_mitigation(void)
369 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
370 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
373 * If the CPU is not affected and the command line mode is NONE or AUTO
374 * then nothing to do.
376 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
377 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
381 case SPECTRE_V2_CMD_NONE:
384 case SPECTRE_V2_CMD_FORCE:
385 case SPECTRE_V2_CMD_AUTO:
386 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
387 mode = SPECTRE_V2_IBRS_ENHANCED;
388 /* Force it so VMEXIT will restore correctly */
389 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
390 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
391 goto specv2_set_mode;
393 if (IS_ENABLED(CONFIG_RETPOLINE))
396 case SPECTRE_V2_CMD_RETPOLINE_AMD:
397 if (IS_ENABLED(CONFIG_RETPOLINE))
400 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
401 if (IS_ENABLED(CONFIG_RETPOLINE))
402 goto retpoline_generic;
404 case SPECTRE_V2_CMD_RETPOLINE:
405 if (IS_ENABLED(CONFIG_RETPOLINE))
409 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
413 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
415 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
416 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
417 goto retpoline_generic;
419 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
420 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
421 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
422 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
425 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
426 SPECTRE_V2_RETPOLINE_MINIMAL;
427 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
431 spectre_v2_enabled = mode;
432 pr_info("%s\n", spectre_v2_strings[mode]);
435 * If spectre v2 protection has been enabled, unconditionally fill
436 * RSB during a context switch; this protects against two independent
439 * - RSB underflow (and switch to BTB) on Skylake+
440 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
442 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
443 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
445 /* Initialize Indirect Branch Prediction Barrier if supported */
446 if (boot_cpu_has(X86_FEATURE_IBPB)) {
447 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
448 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
452 * Retpoline means the kernel is safe because it has no indirect
453 * branches. Enhanced IBRS protects firmware too, so, enable restricted
454 * speculation around firmware calls only when Enhanced IBRS isn't
457 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
458 * the user might select retpoline on the kernel command line and if
459 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
460 * enable IBRS around firmware calls.
462 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
463 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
464 pr_info("Enabling Restricted Speculation for firmware calls\n");
467 /* Enable STIBP if appropriate */
472 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
474 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
476 /* The kernel command line selection */
477 enum ssb_mitigation_cmd {
478 SPEC_STORE_BYPASS_CMD_NONE,
479 SPEC_STORE_BYPASS_CMD_AUTO,
480 SPEC_STORE_BYPASS_CMD_ON,
481 SPEC_STORE_BYPASS_CMD_PRCTL,
482 SPEC_STORE_BYPASS_CMD_SECCOMP,
485 static const char *ssb_strings[] = {
486 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
487 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
488 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
489 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
492 static const struct {
494 enum ssb_mitigation_cmd cmd;
495 } ssb_mitigation_options[] = {
496 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
497 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
498 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
499 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
500 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
503 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
505 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
509 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
510 return SPEC_STORE_BYPASS_CMD_NONE;
512 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
515 return SPEC_STORE_BYPASS_CMD_AUTO;
517 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
518 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
521 cmd = ssb_mitigation_options[i].cmd;
525 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
526 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
527 return SPEC_STORE_BYPASS_CMD_AUTO;
534 static enum ssb_mitigation __init __ssb_select_mitigation(void)
536 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
537 enum ssb_mitigation_cmd cmd;
539 if (!boot_cpu_has(X86_FEATURE_SSBD))
542 cmd = ssb_parse_cmdline();
543 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
544 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
545 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
549 case SPEC_STORE_BYPASS_CMD_AUTO:
550 case SPEC_STORE_BYPASS_CMD_SECCOMP:
552 * Choose prctl+seccomp as the default mode if seccomp is
555 if (IS_ENABLED(CONFIG_SECCOMP))
556 mode = SPEC_STORE_BYPASS_SECCOMP;
558 mode = SPEC_STORE_BYPASS_PRCTL;
560 case SPEC_STORE_BYPASS_CMD_ON:
561 mode = SPEC_STORE_BYPASS_DISABLE;
563 case SPEC_STORE_BYPASS_CMD_PRCTL:
564 mode = SPEC_STORE_BYPASS_PRCTL;
566 case SPEC_STORE_BYPASS_CMD_NONE:
571 * We have three CPU feature flags that are in play here:
572 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
573 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
574 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
576 if (mode == SPEC_STORE_BYPASS_DISABLE) {
577 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
579 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
580 * use a completely different MSR and bit dependent on family.
582 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
583 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
584 x86_amd_ssb_disable();
586 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
587 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
588 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
595 static void ssb_select_mitigation(void)
597 ssb_mode = __ssb_select_mitigation();
599 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
600 pr_info("%s\n", ssb_strings[ssb_mode]);
604 #define pr_fmt(fmt) "Speculation prctl: " fmt
606 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
610 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
611 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
616 /* If speculation is force disabled, enable is not allowed */
617 if (task_spec_ssb_force_disable(task))
619 task_clear_spec_ssb_disable(task);
620 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
622 case PR_SPEC_DISABLE:
623 task_set_spec_ssb_disable(task);
624 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
626 case PR_SPEC_FORCE_DISABLE:
627 task_set_spec_ssb_disable(task);
628 task_set_spec_ssb_force_disable(task);
629 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
636 * If being set on non-current task, delay setting the CPU
637 * mitigation until it is next scheduled.
639 if (task == current && update)
640 speculation_ctrl_update_current();
645 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
649 case PR_SPEC_STORE_BYPASS:
650 return ssb_prctl_set(task, ctrl);
656 #ifdef CONFIG_SECCOMP
657 void arch_seccomp_spec_mitigate(struct task_struct *task)
659 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
660 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
664 static int ssb_prctl_get(struct task_struct *task)
667 case SPEC_STORE_BYPASS_DISABLE:
668 return PR_SPEC_DISABLE;
669 case SPEC_STORE_BYPASS_SECCOMP:
670 case SPEC_STORE_BYPASS_PRCTL:
671 if (task_spec_ssb_force_disable(task))
672 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
673 if (task_spec_ssb_disable(task))
674 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
675 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
677 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
678 return PR_SPEC_ENABLE;
679 return PR_SPEC_NOT_AFFECTED;
683 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
686 case PR_SPEC_STORE_BYPASS:
687 return ssb_prctl_get(task);
693 void x86_spec_ctrl_setup_ap(void)
695 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
696 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
698 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
699 x86_amd_ssb_disable();
703 #define pr_fmt(fmt) "L1TF: " fmt
705 /* Default mitigation for L1TF-affected CPUs */
706 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
707 #if IS_ENABLED(CONFIG_KVM_INTEL)
708 EXPORT_SYMBOL_GPL(l1tf_mitigation);
710 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
711 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
714 * These CPUs all support 44bits physical address space internally in the
715 * cache but CPUID can report a smaller number of physical address bits.
717 * The L1TF mitigation uses the top most address bit for the inversion of
718 * non present PTEs. When the installed memory reaches into the top most
719 * address bit due to memory holes, which has been observed on machines
720 * which report 36bits physical address bits and have 32G RAM installed,
721 * then the mitigation range check in l1tf_select_mitigation() triggers.
722 * This is a false positive because the mitigation is still possible due to
723 * the fact that the cache uses 44bit internally. Use the cache bits
724 * instead of the reported physical bits and adjust them on the affected
725 * machines to 44bit if the reported bits are less than 44.
727 static void override_cache_bits(struct cpuinfo_x86 *c)
732 switch (c->x86_model) {
733 case INTEL_FAM6_NEHALEM:
734 case INTEL_FAM6_WESTMERE:
735 case INTEL_FAM6_SANDYBRIDGE:
736 case INTEL_FAM6_IVYBRIDGE:
737 case INTEL_FAM6_HASWELL_CORE:
738 case INTEL_FAM6_HASWELL_ULT:
739 case INTEL_FAM6_HASWELL_GT3E:
740 case INTEL_FAM6_BROADWELL_CORE:
741 case INTEL_FAM6_BROADWELL_GT3E:
742 case INTEL_FAM6_SKYLAKE_MOBILE:
743 case INTEL_FAM6_SKYLAKE_DESKTOP:
744 case INTEL_FAM6_KABYLAKE_MOBILE:
745 case INTEL_FAM6_KABYLAKE_DESKTOP:
746 if (c->x86_cache_bits < 44)
747 c->x86_cache_bits = 44;
752 static void __init l1tf_select_mitigation(void)
756 if (!boot_cpu_has_bug(X86_BUG_L1TF))
759 override_cache_bits(&boot_cpu_data);
761 switch (l1tf_mitigation) {
762 case L1TF_MITIGATION_OFF:
763 case L1TF_MITIGATION_FLUSH_NOWARN:
764 case L1TF_MITIGATION_FLUSH:
766 case L1TF_MITIGATION_FLUSH_NOSMT:
767 case L1TF_MITIGATION_FULL:
768 cpu_smt_disable(false);
770 case L1TF_MITIGATION_FULL_FORCE:
771 cpu_smt_disable(true);
775 #if CONFIG_PGTABLE_LEVELS == 2
776 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
780 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
781 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
782 e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
783 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
784 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
786 pr_info("However, doing so will make a part of your RAM unusable.\n");
787 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
791 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
794 static int __init l1tf_cmdline(char *str)
796 if (!boot_cpu_has_bug(X86_BUG_L1TF))
802 if (!strcmp(str, "off"))
803 l1tf_mitigation = L1TF_MITIGATION_OFF;
804 else if (!strcmp(str, "flush,nowarn"))
805 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
806 else if (!strcmp(str, "flush"))
807 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
808 else if (!strcmp(str, "flush,nosmt"))
809 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
810 else if (!strcmp(str, "full"))
811 l1tf_mitigation = L1TF_MITIGATION_FULL;
812 else if (!strcmp(str, "full,force"))
813 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
817 early_param("l1tf", l1tf_cmdline);
823 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
825 #if IS_ENABLED(CONFIG_KVM_INTEL)
826 static const char *l1tf_vmx_states[] = {
827 [VMENTER_L1D_FLUSH_AUTO] = "auto",
828 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
829 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
830 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
831 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
832 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
835 static ssize_t l1tf_show_state(char *buf)
837 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
838 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
840 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
841 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
842 cpu_smt_control == CPU_SMT_ENABLED))
843 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
844 l1tf_vmx_states[l1tf_vmx_mitigation]);
846 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
847 l1tf_vmx_states[l1tf_vmx_mitigation],
848 cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
851 static ssize_t l1tf_show_state(char *buf)
853 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
857 static char *stibp_state(void)
859 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
862 if (x86_spec_ctrl_base & SPEC_CTRL_STIBP)
868 static char *ibpb_state(void)
870 if (boot_cpu_has(X86_FEATURE_USE_IBPB))
876 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
877 char *buf, unsigned int bug)
879 if (!boot_cpu_has_bug(bug))
880 return sprintf(buf, "Not affected\n");
883 case X86_BUG_CPU_MELTDOWN:
884 if (boot_cpu_has(X86_FEATURE_KAISER))
885 return sprintf(buf, "Mitigation: PTI\n");
889 case X86_BUG_SPECTRE_V1:
890 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
892 case X86_BUG_SPECTRE_V2:
893 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
895 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
897 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
898 spectre_v2_module_string());
900 case X86_BUG_SPEC_STORE_BYPASS:
901 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
904 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
905 return l1tf_show_state(buf);
911 return sprintf(buf, "Vulnerable\n");
914 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
916 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
919 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
921 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
924 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
926 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
929 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
931 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
934 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
936 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);