OSDN Git Service

8c108343f7ae63909816760d572fa8b11eabb659
[android-x86/kernel.git] / arch / x86 / kernel / cpu / bugs.c
1 /*
2  *  Copyright (C) 1994  Linus Torvalds
3  *
4  *  Cyrix stuff, June 1998 by:
5  *      - Rafael R. Reilova (moved everything from head.S),
6  *        <rreilova@ececs.uc.edu>
7  *      - Channing Corn (tests & fixes),
8  *      - Andrew D. Balsa (code cleanup).
9  */
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16
17 #include <asm/spec-ctrl.h>
18 #include <asm/cmdline.h>
19 #include <asm/bugs.h>
20 #include <asm/processor.h>
21 #include <asm/processor-flags.h>
22 #include <asm/fpu/internal.h>
23 #include <asm/msr.h>
24 #include <asm/vmx.h>
25 #include <asm/paravirt.h>
26 #include <asm/alternative.h>
27 #include <asm/pgtable.h>
28 #include <asm/cacheflush.h>
29 #include <asm/intel-family.h>
30 #include <asm/e820.h>
31
32 static void __init spectre_v2_select_mitigation(void);
33 static void __init ssb_select_mitigation(void);
34 static void __init l1tf_select_mitigation(void);
35
36 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
37 u64 x86_spec_ctrl_base;
38 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
39 static DEFINE_MUTEX(spec_ctrl_mutex);
40
41 /*
42  * The vendor and possibly platform specific bits which can be modified in
43  * x86_spec_ctrl_base.
44  */
45 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
46
47 /*
48  * AMD specific MSR info for Speculative Store Bypass control.
49  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
50  */
51 u64 __ro_after_init x86_amd_ls_cfg_base;
52 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
53
54 void __init check_bugs(void)
55 {
56         identify_boot_cpu();
57
58         /*
59          * identify_boot_cpu() initialized SMT support information, let the
60          * core code know.
61          */
62         cpu_smt_check_topology_early();
63
64         if (!IS_ENABLED(CONFIG_SMP)) {
65                 pr_info("CPU: ");
66                 print_cpu_info(&boot_cpu_data);
67         }
68
69         /*
70          * Read the SPEC_CTRL MSR to account for reserved bits which may
71          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
72          * init code as it is not enumerated and depends on the family.
73          */
74         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
75                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
76
77         /* Allow STIBP in MSR_SPEC_CTRL if supported */
78         if (boot_cpu_has(X86_FEATURE_STIBP))
79                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
80
81         /* Select the proper spectre mitigation before patching alternatives */
82         spectre_v2_select_mitigation();
83
84         /*
85          * Select proper mitigation for any exposure to the Speculative Store
86          * Bypass vulnerability.
87          */
88         ssb_select_mitigation();
89
90         l1tf_select_mitigation();
91
92 #ifdef CONFIG_X86_32
93         /*
94          * Check whether we are able to run this kernel safely on SMP.
95          *
96          * - i386 is no longer supported.
97          * - In order to run on anything without a TSC, we need to be
98          *   compiled for a i486.
99          */
100         if (boot_cpu_data.x86 < 4)
101                 panic("Kernel requires i486+ for 'invlpg' and other features");
102
103         init_utsname()->machine[1] =
104                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
105         alternative_instructions();
106
107         fpu__init_check_bugs();
108 #else /* CONFIG_X86_64 */
109         alternative_instructions();
110
111         /*
112          * Make sure the first 2MB area is not mapped by huge pages
113          * There are typically fixed size MTRRs in there and overlapping
114          * MTRRs into large pages causes slow downs.
115          *
116          * Right now we don't do that with gbpages because there seems
117          * very little benefit for that case.
118          */
119         if (!direct_gbpages)
120                 set_memory_4k((unsigned long)__va(0), 1);
121 #endif
122 }
123
124 /* The kernel command line selection */
125 enum spectre_v2_mitigation_cmd {
126         SPECTRE_V2_CMD_NONE,
127         SPECTRE_V2_CMD_AUTO,
128         SPECTRE_V2_CMD_FORCE,
129         SPECTRE_V2_CMD_RETPOLINE,
130         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
131         SPECTRE_V2_CMD_RETPOLINE_AMD,
132 };
133
134 static const char *spectre_v2_strings[] = {
135         [SPECTRE_V2_NONE]                       = "Vulnerable",
136         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
137         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
138         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
139         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
140         [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
141 };
142
143 #undef pr_fmt
144 #define pr_fmt(fmt)     "Spectre V2 : " fmt
145
146 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
147         SPECTRE_V2_NONE;
148
149 void
150 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
151 {
152         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
153         struct thread_info *ti = current_thread_info();
154
155         /* Is MSR_SPEC_CTRL implemented ? */
156         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
157                 /*
158                  * Restrict guest_spec_ctrl to supported values. Clear the
159                  * modifiable bits in the host base value and or the
160                  * modifiable bits from the guest value.
161                  */
162                 guestval = hostval & ~x86_spec_ctrl_mask;
163                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
164
165                 /* SSBD controlled in MSR_SPEC_CTRL */
166                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
167                     static_cpu_has(X86_FEATURE_AMD_SSBD))
168                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
169
170                 if (hostval != guestval) {
171                         msrval = setguest ? guestval : hostval;
172                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
173                 }
174         }
175
176         /*
177          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
178          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
179          */
180         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
181             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
182                 return;
183
184         /*
185          * If the host has SSBD mitigation enabled, force it in the host's
186          * virtual MSR value. If its not permanently enabled, evaluate
187          * current's TIF_SSBD thread flag.
188          */
189         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
190                 hostval = SPEC_CTRL_SSBD;
191         else
192                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
193
194         /* Sanitize the guest value */
195         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
196
197         if (hostval != guestval) {
198                 unsigned long tif;
199
200                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
201                                  ssbd_spec_ctrl_to_tif(hostval);
202
203                 speculation_ctrl_update(tif);
204         }
205 }
206 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
207
208 static void x86_amd_ssb_disable(void)
209 {
210         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
211
212         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
213                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
214         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
215                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
216 }
217
218 #ifdef RETPOLINE
219 static bool spectre_v2_bad_module;
220
221 bool retpoline_module_ok(bool has_retpoline)
222 {
223         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
224                 return true;
225
226         pr_err("System may be vulnerable to spectre v2\n");
227         spectre_v2_bad_module = true;
228         return false;
229 }
230
231 static inline const char *spectre_v2_module_string(void)
232 {
233         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
234 }
235 #else
236 static inline const char *spectre_v2_module_string(void) { return ""; }
237 #endif
238
239 static void __init spec2_print_if_insecure(const char *reason)
240 {
241         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
242                 pr_info("%s selected on command line.\n", reason);
243 }
244
245 static void __init spec2_print_if_secure(const char *reason)
246 {
247         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
248                 pr_info("%s selected on command line.\n", reason);
249 }
250
251 static inline bool retp_compiler(void)
252 {
253         return __is_defined(RETPOLINE);
254 }
255
256 static inline bool match_option(const char *arg, int arglen, const char *opt)
257 {
258         int len = strlen(opt);
259
260         return len == arglen && !strncmp(arg, opt, len);
261 }
262
263 static const struct {
264         const char *option;
265         enum spectre_v2_mitigation_cmd cmd;
266         bool secure;
267 } mitigation_options[] = {
268         { "off",               SPECTRE_V2_CMD_NONE,              false },
269         { "on",                SPECTRE_V2_CMD_FORCE,             true },
270         { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
271         { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
272         { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
273         { "auto",              SPECTRE_V2_CMD_AUTO,              false },
274 };
275
276 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
277 {
278         char arg[20];
279         int ret, i;
280         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
281
282         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
283                 return SPECTRE_V2_CMD_NONE;
284
285         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
286         if (ret < 0)
287                 return SPECTRE_V2_CMD_AUTO;
288
289         for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
290                 if (!match_option(arg, ret, mitigation_options[i].option))
291                         continue;
292                 cmd = mitigation_options[i].cmd;
293                 break;
294         }
295
296         if (i >= ARRAY_SIZE(mitigation_options)) {
297                 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
298                 return SPECTRE_V2_CMD_AUTO;
299         }
300
301         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
302              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
303              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
304             !IS_ENABLED(CONFIG_RETPOLINE)) {
305                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
306                 return SPECTRE_V2_CMD_AUTO;
307         }
308
309         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
310             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
311                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
312                 return SPECTRE_V2_CMD_AUTO;
313         }
314
315         if (mitigation_options[i].secure)
316                 spec2_print_if_secure(mitigation_options[i].option);
317         else
318                 spec2_print_if_insecure(mitigation_options[i].option);
319
320         return cmd;
321 }
322
323 static bool stibp_needed(void)
324 {
325         if (spectre_v2_enabled == SPECTRE_V2_NONE)
326                 return false;
327
328         /* Enhanced IBRS makes using STIBP unnecessary. */
329         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
330                 return false;
331
332         if (!boot_cpu_has(X86_FEATURE_STIBP))
333                 return false;
334
335         return true;
336 }
337
338 static void update_stibp_msr(void *info)
339 {
340         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
341 }
342
343 void arch_smt_update(void)
344 {
345         u64 mask;
346
347         if (!stibp_needed())
348                 return;
349
350         mutex_lock(&spec_ctrl_mutex);
351         mask = x86_spec_ctrl_base;
352         if (cpu_smt_control == CPU_SMT_ENABLED)
353                 mask |= SPEC_CTRL_STIBP;
354         else
355                 mask &= ~SPEC_CTRL_STIBP;
356
357         if (mask != x86_spec_ctrl_base) {
358                 pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
359                                 cpu_smt_control == CPU_SMT_ENABLED ?
360                                 "Enabling" : "Disabling");
361                 x86_spec_ctrl_base = mask;
362                 on_each_cpu(update_stibp_msr, NULL, 1);
363         }
364         mutex_unlock(&spec_ctrl_mutex);
365 }
366
367 static void __init spectre_v2_select_mitigation(void)
368 {
369         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
370         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
371
372         /*
373          * If the CPU is not affected and the command line mode is NONE or AUTO
374          * then nothing to do.
375          */
376         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
377             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
378                 return;
379
380         switch (cmd) {
381         case SPECTRE_V2_CMD_NONE:
382                 return;
383
384         case SPECTRE_V2_CMD_FORCE:
385         case SPECTRE_V2_CMD_AUTO:
386                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
387                         mode = SPECTRE_V2_IBRS_ENHANCED;
388                         /* Force it so VMEXIT will restore correctly */
389                         x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
390                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
391                         goto specv2_set_mode;
392                 }
393                 if (IS_ENABLED(CONFIG_RETPOLINE))
394                         goto retpoline_auto;
395                 break;
396         case SPECTRE_V2_CMD_RETPOLINE_AMD:
397                 if (IS_ENABLED(CONFIG_RETPOLINE))
398                         goto retpoline_amd;
399                 break;
400         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
401                 if (IS_ENABLED(CONFIG_RETPOLINE))
402                         goto retpoline_generic;
403                 break;
404         case SPECTRE_V2_CMD_RETPOLINE:
405                 if (IS_ENABLED(CONFIG_RETPOLINE))
406                         goto retpoline_auto;
407                 break;
408         }
409         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
410         return;
411
412 retpoline_auto:
413         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
414         retpoline_amd:
415                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
416                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
417                         goto retpoline_generic;
418                 }
419                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
420                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
421                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
422                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
423         } else {
424         retpoline_generic:
425                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
426                                          SPECTRE_V2_RETPOLINE_MINIMAL;
427                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
428         }
429
430 specv2_set_mode:
431         spectre_v2_enabled = mode;
432         pr_info("%s\n", spectre_v2_strings[mode]);
433
434         /*
435          * If spectre v2 protection has been enabled, unconditionally fill
436          * RSB during a context switch; this protects against two independent
437          * issues:
438          *
439          *      - RSB underflow (and switch to BTB) on Skylake+
440          *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
441          */
442         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
443         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
444
445         /* Initialize Indirect Branch Prediction Barrier if supported */
446         if (boot_cpu_has(X86_FEATURE_IBPB)) {
447                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
448                 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
449         }
450
451         /*
452          * Retpoline means the kernel is safe because it has no indirect
453          * branches. Enhanced IBRS protects firmware too, so, enable restricted
454          * speculation around firmware calls only when Enhanced IBRS isn't
455          * supported.
456          *
457          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
458          * the user might select retpoline on the kernel command line and if
459          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
460          * enable IBRS around firmware calls.
461          */
462         if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
463                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
464                 pr_info("Enabling Restricted Speculation for firmware calls\n");
465         }
466
467         /* Enable STIBP if appropriate */
468         arch_smt_update();
469 }
470
471 #undef pr_fmt
472 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
473
474 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
475
476 /* The kernel command line selection */
477 enum ssb_mitigation_cmd {
478         SPEC_STORE_BYPASS_CMD_NONE,
479         SPEC_STORE_BYPASS_CMD_AUTO,
480         SPEC_STORE_BYPASS_CMD_ON,
481         SPEC_STORE_BYPASS_CMD_PRCTL,
482         SPEC_STORE_BYPASS_CMD_SECCOMP,
483 };
484
485 static const char *ssb_strings[] = {
486         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
487         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
488         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
489         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
490 };
491
492 static const struct {
493         const char *option;
494         enum ssb_mitigation_cmd cmd;
495 } ssb_mitigation_options[] = {
496         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
497         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
498         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
499         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
500         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
501 };
502
503 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
504 {
505         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
506         char arg[20];
507         int ret, i;
508
509         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
510                 return SPEC_STORE_BYPASS_CMD_NONE;
511         } else {
512                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
513                                           arg, sizeof(arg));
514                 if (ret < 0)
515                         return SPEC_STORE_BYPASS_CMD_AUTO;
516
517                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
518                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
519                                 continue;
520
521                         cmd = ssb_mitigation_options[i].cmd;
522                         break;
523                 }
524
525                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
526                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
527                         return SPEC_STORE_BYPASS_CMD_AUTO;
528                 }
529         }
530
531         return cmd;
532 }
533
534 static enum ssb_mitigation __init __ssb_select_mitigation(void)
535 {
536         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
537         enum ssb_mitigation_cmd cmd;
538
539         if (!boot_cpu_has(X86_FEATURE_SSBD))
540                 return mode;
541
542         cmd = ssb_parse_cmdline();
543         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
544             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
545              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
546                 return mode;
547
548         switch (cmd) {
549         case SPEC_STORE_BYPASS_CMD_AUTO:
550         case SPEC_STORE_BYPASS_CMD_SECCOMP:
551                 /*
552                  * Choose prctl+seccomp as the default mode if seccomp is
553                  * enabled.
554                  */
555                 if (IS_ENABLED(CONFIG_SECCOMP))
556                         mode = SPEC_STORE_BYPASS_SECCOMP;
557                 else
558                         mode = SPEC_STORE_BYPASS_PRCTL;
559                 break;
560         case SPEC_STORE_BYPASS_CMD_ON:
561                 mode = SPEC_STORE_BYPASS_DISABLE;
562                 break;
563         case SPEC_STORE_BYPASS_CMD_PRCTL:
564                 mode = SPEC_STORE_BYPASS_PRCTL;
565                 break;
566         case SPEC_STORE_BYPASS_CMD_NONE:
567                 break;
568         }
569
570         /*
571          * We have three CPU feature flags that are in play here:
572          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
573          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
574          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
575          */
576         if (mode == SPEC_STORE_BYPASS_DISABLE) {
577                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
578                 /*
579                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
580                  * use a completely different MSR and bit dependent on family.
581                  */
582                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
583                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
584                         x86_amd_ssb_disable();
585                 } else {
586                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
587                         x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
588                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
589                 }
590         }
591
592         return mode;
593 }
594
595 static void ssb_select_mitigation(void)
596 {
597         ssb_mode = __ssb_select_mitigation();
598
599         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
600                 pr_info("%s\n", ssb_strings[ssb_mode]);
601 }
602
603 #undef pr_fmt
604 #define pr_fmt(fmt)     "Speculation prctl: " fmt
605
606 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
607 {
608         bool update;
609
610         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
611             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
612                 return -ENXIO;
613
614         switch (ctrl) {
615         case PR_SPEC_ENABLE:
616                 /* If speculation is force disabled, enable is not allowed */
617                 if (task_spec_ssb_force_disable(task))
618                         return -EPERM;
619                 task_clear_spec_ssb_disable(task);
620                 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
621                 break;
622         case PR_SPEC_DISABLE:
623                 task_set_spec_ssb_disable(task);
624                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
625                 break;
626         case PR_SPEC_FORCE_DISABLE:
627                 task_set_spec_ssb_disable(task);
628                 task_set_spec_ssb_force_disable(task);
629                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
630                 break;
631         default:
632                 return -ERANGE;
633         }
634
635         /*
636          * If being set on non-current task, delay setting the CPU
637          * mitigation until it is next scheduled.
638          */
639         if (task == current && update)
640                 speculation_ctrl_update_current();
641
642         return 0;
643 }
644
645 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
646                              unsigned long ctrl)
647 {
648         switch (which) {
649         case PR_SPEC_STORE_BYPASS:
650                 return ssb_prctl_set(task, ctrl);
651         default:
652                 return -ENODEV;
653         }
654 }
655
656 #ifdef CONFIG_SECCOMP
657 void arch_seccomp_spec_mitigate(struct task_struct *task)
658 {
659         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
660                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
661 }
662 #endif
663
664 static int ssb_prctl_get(struct task_struct *task)
665 {
666         switch (ssb_mode) {
667         case SPEC_STORE_BYPASS_DISABLE:
668                 return PR_SPEC_DISABLE;
669         case SPEC_STORE_BYPASS_SECCOMP:
670         case SPEC_STORE_BYPASS_PRCTL:
671                 if (task_spec_ssb_force_disable(task))
672                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
673                 if (task_spec_ssb_disable(task))
674                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
675                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
676         default:
677                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
678                         return PR_SPEC_ENABLE;
679                 return PR_SPEC_NOT_AFFECTED;
680         }
681 }
682
683 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
684 {
685         switch (which) {
686         case PR_SPEC_STORE_BYPASS:
687                 return ssb_prctl_get(task);
688         default:
689                 return -ENODEV;
690         }
691 }
692
693 void x86_spec_ctrl_setup_ap(void)
694 {
695         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
696                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
697
698         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
699                 x86_amd_ssb_disable();
700 }
701
702 #undef pr_fmt
703 #define pr_fmt(fmt)     "L1TF: " fmt
704
705 /* Default mitigation for L1TF-affected CPUs */
706 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
707 #if IS_ENABLED(CONFIG_KVM_INTEL)
708 EXPORT_SYMBOL_GPL(l1tf_mitigation);
709 #endif
710 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
711 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
712
713 /*
714  * These CPUs all support 44bits physical address space internally in the
715  * cache but CPUID can report a smaller number of physical address bits.
716  *
717  * The L1TF mitigation uses the top most address bit for the inversion of
718  * non present PTEs. When the installed memory reaches into the top most
719  * address bit due to memory holes, which has been observed on machines
720  * which report 36bits physical address bits and have 32G RAM installed,
721  * then the mitigation range check in l1tf_select_mitigation() triggers.
722  * This is a false positive because the mitigation is still possible due to
723  * the fact that the cache uses 44bit internally. Use the cache bits
724  * instead of the reported physical bits and adjust them on the affected
725  * machines to 44bit if the reported bits are less than 44.
726  */
727 static void override_cache_bits(struct cpuinfo_x86 *c)
728 {
729         if (c->x86 != 6)
730                 return;
731
732         switch (c->x86_model) {
733         case INTEL_FAM6_NEHALEM:
734         case INTEL_FAM6_WESTMERE:
735         case INTEL_FAM6_SANDYBRIDGE:
736         case INTEL_FAM6_IVYBRIDGE:
737         case INTEL_FAM6_HASWELL_CORE:
738         case INTEL_FAM6_HASWELL_ULT:
739         case INTEL_FAM6_HASWELL_GT3E:
740         case INTEL_FAM6_BROADWELL_CORE:
741         case INTEL_FAM6_BROADWELL_GT3E:
742         case INTEL_FAM6_SKYLAKE_MOBILE:
743         case INTEL_FAM6_SKYLAKE_DESKTOP:
744         case INTEL_FAM6_KABYLAKE_MOBILE:
745         case INTEL_FAM6_KABYLAKE_DESKTOP:
746                 if (c->x86_cache_bits < 44)
747                         c->x86_cache_bits = 44;
748                 break;
749         }
750 }
751
752 static void __init l1tf_select_mitigation(void)
753 {
754         u64 half_pa;
755
756         if (!boot_cpu_has_bug(X86_BUG_L1TF))
757                 return;
758
759         override_cache_bits(&boot_cpu_data);
760
761         switch (l1tf_mitigation) {
762         case L1TF_MITIGATION_OFF:
763         case L1TF_MITIGATION_FLUSH_NOWARN:
764         case L1TF_MITIGATION_FLUSH:
765                 break;
766         case L1TF_MITIGATION_FLUSH_NOSMT:
767         case L1TF_MITIGATION_FULL:
768                 cpu_smt_disable(false);
769                 break;
770         case L1TF_MITIGATION_FULL_FORCE:
771                 cpu_smt_disable(true);
772                 break;
773         }
774
775 #if CONFIG_PGTABLE_LEVELS == 2
776         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
777         return;
778 #endif
779
780         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
781         if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
782                         e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
783                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
784                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
785                                 half_pa);
786                 pr_info("However, doing so will make a part of your RAM unusable.\n");
787                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
788                 return;
789         }
790
791         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
792 }
793
794 static int __init l1tf_cmdline(char *str)
795 {
796         if (!boot_cpu_has_bug(X86_BUG_L1TF))
797                 return 0;
798
799         if (!str)
800                 return -EINVAL;
801
802         if (!strcmp(str, "off"))
803                 l1tf_mitigation = L1TF_MITIGATION_OFF;
804         else if (!strcmp(str, "flush,nowarn"))
805                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
806         else if (!strcmp(str, "flush"))
807                 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
808         else if (!strcmp(str, "flush,nosmt"))
809                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
810         else if (!strcmp(str, "full"))
811                 l1tf_mitigation = L1TF_MITIGATION_FULL;
812         else if (!strcmp(str, "full,force"))
813                 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
814
815         return 0;
816 }
817 early_param("l1tf", l1tf_cmdline);
818
819 #undef pr_fmt
820
821 #ifdef CONFIG_SYSFS
822
823 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
824
825 #if IS_ENABLED(CONFIG_KVM_INTEL)
826 static const char *l1tf_vmx_states[] = {
827         [VMENTER_L1D_FLUSH_AUTO]                = "auto",
828         [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
829         [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
830         [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
831         [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
832         [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
833 };
834
835 static ssize_t l1tf_show_state(char *buf)
836 {
837         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
838                 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
839
840         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
841             (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
842              cpu_smt_control == CPU_SMT_ENABLED))
843                 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
844                                l1tf_vmx_states[l1tf_vmx_mitigation]);
845
846         return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
847                        l1tf_vmx_states[l1tf_vmx_mitigation],
848                        cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
849 }
850 #else
851 static ssize_t l1tf_show_state(char *buf)
852 {
853         return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
854 }
855 #endif
856
857 static char *stibp_state(void)
858 {
859         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
860                 return "";
861
862         if (x86_spec_ctrl_base & SPEC_CTRL_STIBP)
863                 return ", STIBP";
864         else
865                 return "";
866 }
867
868 static char *ibpb_state(void)
869 {
870         if (boot_cpu_has(X86_FEATURE_USE_IBPB))
871                 return ", IBPB";
872         else
873                 return "";
874 }
875
876 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
877                                char *buf, unsigned int bug)
878 {
879         if (!boot_cpu_has_bug(bug))
880                 return sprintf(buf, "Not affected\n");
881
882         switch (bug) {
883         case X86_BUG_CPU_MELTDOWN:
884                 if (boot_cpu_has(X86_FEATURE_KAISER))
885                         return sprintf(buf, "Mitigation: PTI\n");
886
887                 break;
888
889         case X86_BUG_SPECTRE_V1:
890                 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
891
892         case X86_BUG_SPECTRE_V2:
893                 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
894                                ibpb_state(),
895                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
896                                stibp_state(),
897                                boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
898                                spectre_v2_module_string());
899
900         case X86_BUG_SPEC_STORE_BYPASS:
901                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
902
903         case X86_BUG_L1TF:
904                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
905                         return l1tf_show_state(buf);
906                 break;
907         default:
908                 break;
909         }
910
911         return sprintf(buf, "Vulnerable\n");
912 }
913
914 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
915 {
916         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
917 }
918
919 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
920 {
921         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
922 }
923
924 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
925 {
926         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
927 }
928
929 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
930 {
931         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
932 }
933
934 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
935 {
936         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
937 }
938 #endif