OSDN Git Service

x86/speculation: Remove unnecessary ret variable in cpu_show_common()
[uclinux-h8/linux.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17
18 #include <asm/spec-ctrl.h>
19 #include <asm/cmdline.h>
20 #include <asm/bugs.h>
21 #include <asm/processor.h>
22 #include <asm/processor-flags.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/msr.h>
25 #include <asm/vmx.h>
26 #include <asm/paravirt.h>
27 #include <asm/alternative.h>
28 #include <asm/pgtable.h>
29 #include <asm/set_memory.h>
30 #include <asm/intel-family.h>
31 #include <asm/e820/api.h>
32 #include <asm/hypervisor.h>
33
34 static void __init spectre_v2_select_mitigation(void);
35 static void __init ssb_select_mitigation(void);
36 static void __init l1tf_select_mitigation(void);
37
38 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
39 u64 x86_spec_ctrl_base;
40 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
41 static DEFINE_MUTEX(spec_ctrl_mutex);
42
43 /*
44  * The vendor and possibly platform specific bits which can be modified in
45  * x86_spec_ctrl_base.
46  */
47 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
48
49 /*
50  * AMD specific MSR info for Speculative Store Bypass control.
51  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
52  */
53 u64 __ro_after_init x86_amd_ls_cfg_base;
54 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
55
56 void __init check_bugs(void)
57 {
58         identify_boot_cpu();
59
60         /*
61          * identify_boot_cpu() initialized SMT support information, let the
62          * core code know.
63          */
64         cpu_smt_check_topology_early();
65
66         if (!IS_ENABLED(CONFIG_SMP)) {
67                 pr_info("CPU: ");
68                 print_cpu_info(&boot_cpu_data);
69         }
70
71         /*
72          * Read the SPEC_CTRL MSR to account for reserved bits which may
73          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
74          * init code as it is not enumerated and depends on the family.
75          */
76         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
77                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
78
79         /* Allow STIBP in MSR_SPEC_CTRL if supported */
80         if (boot_cpu_has(X86_FEATURE_STIBP))
81                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
82
83         /* Select the proper spectre mitigation before patching alternatives */
84         spectre_v2_select_mitigation();
85
86         /*
87          * Select proper mitigation for any exposure to the Speculative Store
88          * Bypass vulnerability.
89          */
90         ssb_select_mitigation();
91
92         l1tf_select_mitigation();
93
94 #ifdef CONFIG_X86_32
95         /*
96          * Check whether we are able to run this kernel safely on SMP.
97          *
98          * - i386 is no longer supported.
99          * - In order to run on anything without a TSC, we need to be
100          *   compiled for a i486.
101          */
102         if (boot_cpu_data.x86 < 4)
103                 panic("Kernel requires i486+ for 'invlpg' and other features");
104
105         init_utsname()->machine[1] =
106                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
107         alternative_instructions();
108
109         fpu__init_check_bugs();
110 #else /* CONFIG_X86_64 */
111         alternative_instructions();
112
113         /*
114          * Make sure the first 2MB area is not mapped by huge pages
115          * There are typically fixed size MTRRs in there and overlapping
116          * MTRRs into large pages causes slow downs.
117          *
118          * Right now we don't do that with gbpages because there seems
119          * very little benefit for that case.
120          */
121         if (!direct_gbpages)
122                 set_memory_4k((unsigned long)__va(0), 1);
123 #endif
124 }
125
126 /* The kernel command line selection */
127 enum spectre_v2_mitigation_cmd {
128         SPECTRE_V2_CMD_NONE,
129         SPECTRE_V2_CMD_AUTO,
130         SPECTRE_V2_CMD_FORCE,
131         SPECTRE_V2_CMD_RETPOLINE,
132         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
133         SPECTRE_V2_CMD_RETPOLINE_AMD,
134 };
135
136 static const char *spectre_v2_strings[] = {
137         [SPECTRE_V2_NONE]                       = "Vulnerable",
138         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
139         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
140         [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
141 };
142
143 #undef pr_fmt
144 #define pr_fmt(fmt)     "Spectre V2 : " fmt
145
146 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
147         SPECTRE_V2_NONE;
148
149 void
150 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
151 {
152         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
153         struct thread_info *ti = current_thread_info();
154
155         /* Is MSR_SPEC_CTRL implemented ? */
156         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
157                 /*
158                  * Restrict guest_spec_ctrl to supported values. Clear the
159                  * modifiable bits in the host base value and or the
160                  * modifiable bits from the guest value.
161                  */
162                 guestval = hostval & ~x86_spec_ctrl_mask;
163                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
164
165                 /* SSBD controlled in MSR_SPEC_CTRL */
166                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
167                     static_cpu_has(X86_FEATURE_AMD_SSBD))
168                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
169
170                 if (hostval != guestval) {
171                         msrval = setguest ? guestval : hostval;
172                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
173                 }
174         }
175
176         /*
177          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
178          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
179          */
180         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
181             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
182                 return;
183
184         /*
185          * If the host has SSBD mitigation enabled, force it in the host's
186          * virtual MSR value. If its not permanently enabled, evaluate
187          * current's TIF_SSBD thread flag.
188          */
189         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
190                 hostval = SPEC_CTRL_SSBD;
191         else
192                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
193
194         /* Sanitize the guest value */
195         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
196
197         if (hostval != guestval) {
198                 unsigned long tif;
199
200                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
201                                  ssbd_spec_ctrl_to_tif(hostval);
202
203                 speculative_store_bypass_update(tif);
204         }
205 }
206 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
207
208 static void x86_amd_ssb_disable(void)
209 {
210         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
211
212         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
213                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
214         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
215                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
216 }
217
218 #ifdef RETPOLINE
219 static bool spectre_v2_bad_module;
220
221 bool retpoline_module_ok(bool has_retpoline)
222 {
223         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
224                 return true;
225
226         pr_err("System may be vulnerable to spectre v2\n");
227         spectre_v2_bad_module = true;
228         return false;
229 }
230
231 static inline const char *spectre_v2_module_string(void)
232 {
233         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
234 }
235 #else
236 static inline const char *spectre_v2_module_string(void) { return ""; }
237 #endif
238
239 static void __init spec2_print_if_insecure(const char *reason)
240 {
241         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
242                 pr_info("%s selected on command line.\n", reason);
243 }
244
245 static void __init spec2_print_if_secure(const char *reason)
246 {
247         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
248                 pr_info("%s selected on command line.\n", reason);
249 }
250
251 static inline bool match_option(const char *arg, int arglen, const char *opt)
252 {
253         int len = strlen(opt);
254
255         return len == arglen && !strncmp(arg, opt, len);
256 }
257
258 static const struct {
259         const char *option;
260         enum spectre_v2_mitigation_cmd cmd;
261         bool secure;
262 } mitigation_options[] = {
263         { "off",               SPECTRE_V2_CMD_NONE,              false },
264         { "on",                SPECTRE_V2_CMD_FORCE,             true },
265         { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
266         { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
267         { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
268         { "auto",              SPECTRE_V2_CMD_AUTO,              false },
269 };
270
271 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
272 {
273         char arg[20];
274         int ret, i;
275         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
276
277         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
278                 return SPECTRE_V2_CMD_NONE;
279
280         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
281         if (ret < 0)
282                 return SPECTRE_V2_CMD_AUTO;
283
284         for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
285                 if (!match_option(arg, ret, mitigation_options[i].option))
286                         continue;
287                 cmd = mitigation_options[i].cmd;
288                 break;
289         }
290
291         if (i >= ARRAY_SIZE(mitigation_options)) {
292                 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
293                 return SPECTRE_V2_CMD_AUTO;
294         }
295
296         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
297              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
298              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
299             !IS_ENABLED(CONFIG_RETPOLINE)) {
300                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
301                 return SPECTRE_V2_CMD_AUTO;
302         }
303
304         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
305             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
306             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
307                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
308                 return SPECTRE_V2_CMD_AUTO;
309         }
310
311         if (mitigation_options[i].secure)
312                 spec2_print_if_secure(mitigation_options[i].option);
313         else
314                 spec2_print_if_insecure(mitigation_options[i].option);
315
316         return cmd;
317 }
318
319 static bool stibp_needed(void)
320 {
321         if (spectre_v2_enabled == SPECTRE_V2_NONE)
322                 return false;
323
324         if (!boot_cpu_has(X86_FEATURE_STIBP))
325                 return false;
326
327         return true;
328 }
329
330 static void update_stibp_msr(void *info)
331 {
332         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
333 }
334
335 void arch_smt_update(void)
336 {
337         u64 mask;
338
339         if (!stibp_needed())
340                 return;
341
342         mutex_lock(&spec_ctrl_mutex);
343         mask = x86_spec_ctrl_base;
344         if (cpu_smt_control == CPU_SMT_ENABLED)
345                 mask |= SPEC_CTRL_STIBP;
346         else
347                 mask &= ~SPEC_CTRL_STIBP;
348
349         if (mask != x86_spec_ctrl_base) {
350                 pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
351                                 cpu_smt_control == CPU_SMT_ENABLED ?
352                                 "Enabling" : "Disabling");
353                 x86_spec_ctrl_base = mask;
354                 on_each_cpu(update_stibp_msr, NULL, 1);
355         }
356         mutex_unlock(&spec_ctrl_mutex);
357 }
358
359 static void __init spectre_v2_select_mitigation(void)
360 {
361         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
362         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
363
364         /*
365          * If the CPU is not affected and the command line mode is NONE or AUTO
366          * then nothing to do.
367          */
368         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
369             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
370                 return;
371
372         switch (cmd) {
373         case SPECTRE_V2_CMD_NONE:
374                 return;
375
376         case SPECTRE_V2_CMD_FORCE:
377         case SPECTRE_V2_CMD_AUTO:
378                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
379                         mode = SPECTRE_V2_IBRS_ENHANCED;
380                         /* Force it so VMEXIT will restore correctly */
381                         x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
382                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
383                         goto specv2_set_mode;
384                 }
385                 if (IS_ENABLED(CONFIG_RETPOLINE))
386                         goto retpoline_auto;
387                 break;
388         case SPECTRE_V2_CMD_RETPOLINE_AMD:
389                 if (IS_ENABLED(CONFIG_RETPOLINE))
390                         goto retpoline_amd;
391                 break;
392         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
393                 if (IS_ENABLED(CONFIG_RETPOLINE))
394                         goto retpoline_generic;
395                 break;
396         case SPECTRE_V2_CMD_RETPOLINE:
397                 if (IS_ENABLED(CONFIG_RETPOLINE))
398                         goto retpoline_auto;
399                 break;
400         }
401         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
402         return;
403
404 retpoline_auto:
405         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
406             boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
407         retpoline_amd:
408                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
409                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
410                         goto retpoline_generic;
411                 }
412                 mode = SPECTRE_V2_RETPOLINE_AMD;
413                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
414                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
415         } else {
416         retpoline_generic:
417                 mode = SPECTRE_V2_RETPOLINE_GENERIC;
418                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
419         }
420
421 specv2_set_mode:
422         spectre_v2_enabled = mode;
423         pr_info("%s\n", spectre_v2_strings[mode]);
424
425         /*
426          * If spectre v2 protection has been enabled, unconditionally fill
427          * RSB during a context switch; this protects against two independent
428          * issues:
429          *
430          *      - RSB underflow (and switch to BTB) on Skylake+
431          *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
432          */
433         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
434         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
435
436         /* Initialize Indirect Branch Prediction Barrier if supported */
437         if (boot_cpu_has(X86_FEATURE_IBPB)) {
438                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
439                 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
440         }
441
442         /*
443          * Retpoline means the kernel is safe because it has no indirect
444          * branches. Enhanced IBRS protects firmware too, so, enable restricted
445          * speculation around firmware calls only when Enhanced IBRS isn't
446          * supported.
447          *
448          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
449          * the user might select retpoline on the kernel command line and if
450          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
451          * enable IBRS around firmware calls.
452          */
453         if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
454                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
455                 pr_info("Enabling Restricted Speculation for firmware calls\n");
456         }
457
458         /* Enable STIBP if appropriate */
459         arch_smt_update();
460 }
461
462 #undef pr_fmt
463 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
464
465 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
466
467 /* The kernel command line selection */
468 enum ssb_mitigation_cmd {
469         SPEC_STORE_BYPASS_CMD_NONE,
470         SPEC_STORE_BYPASS_CMD_AUTO,
471         SPEC_STORE_BYPASS_CMD_ON,
472         SPEC_STORE_BYPASS_CMD_PRCTL,
473         SPEC_STORE_BYPASS_CMD_SECCOMP,
474 };
475
476 static const char *ssb_strings[] = {
477         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
478         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
479         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
480         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
481 };
482
483 static const struct {
484         const char *option;
485         enum ssb_mitigation_cmd cmd;
486 } ssb_mitigation_options[] = {
487         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
488         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
489         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
490         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
491         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
492 };
493
494 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
495 {
496         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
497         char arg[20];
498         int ret, i;
499
500         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
501                 return SPEC_STORE_BYPASS_CMD_NONE;
502         } else {
503                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
504                                           arg, sizeof(arg));
505                 if (ret < 0)
506                         return SPEC_STORE_BYPASS_CMD_AUTO;
507
508                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
509                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
510                                 continue;
511
512                         cmd = ssb_mitigation_options[i].cmd;
513                         break;
514                 }
515
516                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
517                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
518                         return SPEC_STORE_BYPASS_CMD_AUTO;
519                 }
520         }
521
522         return cmd;
523 }
524
525 static enum ssb_mitigation __init __ssb_select_mitigation(void)
526 {
527         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
528         enum ssb_mitigation_cmd cmd;
529
530         if (!boot_cpu_has(X86_FEATURE_SSBD))
531                 return mode;
532
533         cmd = ssb_parse_cmdline();
534         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
535             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
536              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
537                 return mode;
538
539         switch (cmd) {
540         case SPEC_STORE_BYPASS_CMD_AUTO:
541         case SPEC_STORE_BYPASS_CMD_SECCOMP:
542                 /*
543                  * Choose prctl+seccomp as the default mode if seccomp is
544                  * enabled.
545                  */
546                 if (IS_ENABLED(CONFIG_SECCOMP))
547                         mode = SPEC_STORE_BYPASS_SECCOMP;
548                 else
549                         mode = SPEC_STORE_BYPASS_PRCTL;
550                 break;
551         case SPEC_STORE_BYPASS_CMD_ON:
552                 mode = SPEC_STORE_BYPASS_DISABLE;
553                 break;
554         case SPEC_STORE_BYPASS_CMD_PRCTL:
555                 mode = SPEC_STORE_BYPASS_PRCTL;
556                 break;
557         case SPEC_STORE_BYPASS_CMD_NONE:
558                 break;
559         }
560
561         /*
562          * We have three CPU feature flags that are in play here:
563          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
564          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
565          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
566          */
567         if (mode == SPEC_STORE_BYPASS_DISABLE) {
568                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
569                 /*
570                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
571                  * use a completely different MSR and bit dependent on family.
572                  */
573                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
574                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
575                         x86_amd_ssb_disable();
576                 } else {
577                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
578                         x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
579                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
580                 }
581         }
582
583         return mode;
584 }
585
586 static void ssb_select_mitigation(void)
587 {
588         ssb_mode = __ssb_select_mitigation();
589
590         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
591                 pr_info("%s\n", ssb_strings[ssb_mode]);
592 }
593
594 #undef pr_fmt
595 #define pr_fmt(fmt)     "Speculation prctl: " fmt
596
597 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
598 {
599         bool update;
600
601         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
602             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
603                 return -ENXIO;
604
605         switch (ctrl) {
606         case PR_SPEC_ENABLE:
607                 /* If speculation is force disabled, enable is not allowed */
608                 if (task_spec_ssb_force_disable(task))
609                         return -EPERM;
610                 task_clear_spec_ssb_disable(task);
611                 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
612                 break;
613         case PR_SPEC_DISABLE:
614                 task_set_spec_ssb_disable(task);
615                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
616                 break;
617         case PR_SPEC_FORCE_DISABLE:
618                 task_set_spec_ssb_disable(task);
619                 task_set_spec_ssb_force_disable(task);
620                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
621                 break;
622         default:
623                 return -ERANGE;
624         }
625
626         /*
627          * If being set on non-current task, delay setting the CPU
628          * mitigation until it is next scheduled.
629          */
630         if (task == current && update)
631                 speculative_store_bypass_update_current();
632
633         return 0;
634 }
635
636 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
637                              unsigned long ctrl)
638 {
639         switch (which) {
640         case PR_SPEC_STORE_BYPASS:
641                 return ssb_prctl_set(task, ctrl);
642         default:
643                 return -ENODEV;
644         }
645 }
646
647 #ifdef CONFIG_SECCOMP
648 void arch_seccomp_spec_mitigate(struct task_struct *task)
649 {
650         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
651                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
652 }
653 #endif
654
655 static int ssb_prctl_get(struct task_struct *task)
656 {
657         switch (ssb_mode) {
658         case SPEC_STORE_BYPASS_DISABLE:
659                 return PR_SPEC_DISABLE;
660         case SPEC_STORE_BYPASS_SECCOMP:
661         case SPEC_STORE_BYPASS_PRCTL:
662                 if (task_spec_ssb_force_disable(task))
663                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
664                 if (task_spec_ssb_disable(task))
665                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
666                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
667         default:
668                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
669                         return PR_SPEC_ENABLE;
670                 return PR_SPEC_NOT_AFFECTED;
671         }
672 }
673
674 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
675 {
676         switch (which) {
677         case PR_SPEC_STORE_BYPASS:
678                 return ssb_prctl_get(task);
679         default:
680                 return -ENODEV;
681         }
682 }
683
684 void x86_spec_ctrl_setup_ap(void)
685 {
686         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
687                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
688
689         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
690                 x86_amd_ssb_disable();
691 }
692
693 #undef pr_fmt
694 #define pr_fmt(fmt)     "L1TF: " fmt
695
696 /* Default mitigation for L1TF-affected CPUs */
697 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
698 #if IS_ENABLED(CONFIG_KVM_INTEL)
699 EXPORT_SYMBOL_GPL(l1tf_mitigation);
700 #endif
701 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
702 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
703
704 /*
705  * These CPUs all support 44bits physical address space internally in the
706  * cache but CPUID can report a smaller number of physical address bits.
707  *
708  * The L1TF mitigation uses the top most address bit for the inversion of
709  * non present PTEs. When the installed memory reaches into the top most
710  * address bit due to memory holes, which has been observed on machines
711  * which report 36bits physical address bits and have 32G RAM installed,
712  * then the mitigation range check in l1tf_select_mitigation() triggers.
713  * This is a false positive because the mitigation is still possible due to
714  * the fact that the cache uses 44bit internally. Use the cache bits
715  * instead of the reported physical bits and adjust them on the affected
716  * machines to 44bit if the reported bits are less than 44.
717  */
718 static void override_cache_bits(struct cpuinfo_x86 *c)
719 {
720         if (c->x86 != 6)
721                 return;
722
723         switch (c->x86_model) {
724         case INTEL_FAM6_NEHALEM:
725         case INTEL_FAM6_WESTMERE:
726         case INTEL_FAM6_SANDYBRIDGE:
727         case INTEL_FAM6_IVYBRIDGE:
728         case INTEL_FAM6_HASWELL_CORE:
729         case INTEL_FAM6_HASWELL_ULT:
730         case INTEL_FAM6_HASWELL_GT3E:
731         case INTEL_FAM6_BROADWELL_CORE:
732         case INTEL_FAM6_BROADWELL_GT3E:
733         case INTEL_FAM6_SKYLAKE_MOBILE:
734         case INTEL_FAM6_SKYLAKE_DESKTOP:
735         case INTEL_FAM6_KABYLAKE_MOBILE:
736         case INTEL_FAM6_KABYLAKE_DESKTOP:
737                 if (c->x86_cache_bits < 44)
738                         c->x86_cache_bits = 44;
739                 break;
740         }
741 }
742
743 static void __init l1tf_select_mitigation(void)
744 {
745         u64 half_pa;
746
747         if (!boot_cpu_has_bug(X86_BUG_L1TF))
748                 return;
749
750         override_cache_bits(&boot_cpu_data);
751
752         switch (l1tf_mitigation) {
753         case L1TF_MITIGATION_OFF:
754         case L1TF_MITIGATION_FLUSH_NOWARN:
755         case L1TF_MITIGATION_FLUSH:
756                 break;
757         case L1TF_MITIGATION_FLUSH_NOSMT:
758         case L1TF_MITIGATION_FULL:
759                 cpu_smt_disable(false);
760                 break;
761         case L1TF_MITIGATION_FULL_FORCE:
762                 cpu_smt_disable(true);
763                 break;
764         }
765
766 #if CONFIG_PGTABLE_LEVELS == 2
767         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
768         return;
769 #endif
770
771         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
772         if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
773                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
774                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
775                                 half_pa);
776                 pr_info("However, doing so will make a part of your RAM unusable.\n");
777                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
778                 return;
779         }
780
781         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
782 }
783
784 static int __init l1tf_cmdline(char *str)
785 {
786         if (!boot_cpu_has_bug(X86_BUG_L1TF))
787                 return 0;
788
789         if (!str)
790                 return -EINVAL;
791
792         if (!strcmp(str, "off"))
793                 l1tf_mitigation = L1TF_MITIGATION_OFF;
794         else if (!strcmp(str, "flush,nowarn"))
795                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
796         else if (!strcmp(str, "flush"))
797                 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
798         else if (!strcmp(str, "flush,nosmt"))
799                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
800         else if (!strcmp(str, "full"))
801                 l1tf_mitigation = L1TF_MITIGATION_FULL;
802         else if (!strcmp(str, "full,force"))
803                 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
804
805         return 0;
806 }
807 early_param("l1tf", l1tf_cmdline);
808
809 #undef pr_fmt
810
811 #ifdef CONFIG_SYSFS
812
813 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
814
815 #if IS_ENABLED(CONFIG_KVM_INTEL)
816 static const char *l1tf_vmx_states[] = {
817         [VMENTER_L1D_FLUSH_AUTO]                = "auto",
818         [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
819         [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
820         [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
821         [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
822         [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
823 };
824
825 static ssize_t l1tf_show_state(char *buf)
826 {
827         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
828                 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
829
830         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
831             (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
832              cpu_smt_control == CPU_SMT_ENABLED))
833                 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
834                                l1tf_vmx_states[l1tf_vmx_mitigation]);
835
836         return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
837                        l1tf_vmx_states[l1tf_vmx_mitigation],
838                        cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
839 }
840 #else
841 static ssize_t l1tf_show_state(char *buf)
842 {
843         return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
844 }
845 #endif
846
847 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
848                                char *buf, unsigned int bug)
849 {
850         if (!boot_cpu_has_bug(bug))
851                 return sprintf(buf, "Not affected\n");
852
853         switch (bug) {
854         case X86_BUG_CPU_MELTDOWN:
855                 if (boot_cpu_has(X86_FEATURE_PTI))
856                         return sprintf(buf, "Mitigation: PTI\n");
857
858                 if (hypervisor_is_type(X86_HYPER_XEN_PV))
859                         return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
860
861                 break;
862
863         case X86_BUG_SPECTRE_V1:
864                 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
865
866         case X86_BUG_SPECTRE_V2:
867                 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
868                                boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
869                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
870                                (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
871                                boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
872                                spectre_v2_module_string());
873
874         case X86_BUG_SPEC_STORE_BYPASS:
875                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
876
877         case X86_BUG_L1TF:
878                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
879                         return l1tf_show_state(buf);
880                 break;
881         default:
882                 break;
883         }
884
885         return sprintf(buf, "Vulnerable\n");
886 }
887
888 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
889 {
890         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
891 }
892
893 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
894 {
895         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
896 }
897
898 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
899 {
900         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
901 }
902
903 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
904 {
905         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
906 }
907
908 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
909 {
910         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
911 }
912 #endif