OSDN Git Service

7fd0a13ae0ba43e0d0f72ecfb5b4012e2d2a9ed3
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / arch / x86 / kernel / cpu / bugs.c
1 /*
2  *  Copyright (C) 1994  Linus Torvalds
3  *
4  *  Cyrix stuff, June 1998 by:
5  *      - Rafael R. Reilova (moved everything from head.S),
6  *        <rreilova@ececs.uc.edu>
7  *      - Channing Corn (tests & fixes),
8  *      - Andrew D. Balsa (code cleanup).
9  */
10 #include <linux/init.h>
11 #include <linux/utsname.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17
18 #include <asm/spec-ctrl.h>
19 #include <asm/cmdline.h>
20 #include <asm/bugs.h>
21 #include <asm/processor.h>
22 #include <asm/processor-flags.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/msr.h>
25 #include <asm/paravirt.h>
26 #include <asm/alternative.h>
27 #include <asm/hypervisor.h>
28 #include <asm/pgtable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/intel-family.h>
31 #include <asm/e820.h>
32
33 #include "cpu.h"
34
35 static void __init spectre_v1_select_mitigation(void);
36 static void __init spectre_v2_select_mitigation(void);
37 static void __init ssb_select_mitigation(void);
38 static void __init l1tf_select_mitigation(void);
39 static void __init mds_select_mitigation(void);
40 static void __init taa_select_mitigation(void);
41
42 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
43 u64 x86_spec_ctrl_base;
44 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
45 static DEFINE_MUTEX(spec_ctrl_mutex);
46
47 /*
48  * The vendor and possibly platform specific bits which can be modified in
49  * x86_spec_ctrl_base.
50  */
51 static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
52
53 /*
54  * AMD specific MSR info for Speculative Store Bypass control.
55  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
56  */
57 u64 x86_amd_ls_cfg_base;
58 u64 x86_amd_ls_cfg_ssbd_mask;
59
60 /* Control conditional STIPB in switch_to() */
61 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
62 /* Control conditional IBPB in switch_mm() */
63 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
64 /* Control unconditional IBPB in switch_mm() */
65 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
66
67 /* Control MDS CPU buffer clear before returning to user space */
68 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
69 /* Control MDS CPU buffer clear before idling (halt, mwait) */
70 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
71 EXPORT_SYMBOL_GPL(mds_idle_clear);
72
73 void __init check_bugs(void)
74 {
75         identify_boot_cpu();
76
77         if (!IS_ENABLED(CONFIG_SMP)) {
78                 pr_info("CPU: ");
79                 print_cpu_info(&boot_cpu_data);
80         }
81
82         /*
83          * Read the SPEC_CTRL MSR to account for reserved bits which may
84          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
85          * init code as it is not enumerated and depends on the family.
86          */
87         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
88                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
89
90         /* Allow STIBP in MSR_SPEC_CTRL if supported */
91         if (boot_cpu_has(X86_FEATURE_STIBP))
92                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
93
94         /* Select the proper CPU mitigations before patching alternatives: */
95         spectre_v1_select_mitigation();
96         spectre_v2_select_mitigation();
97         ssb_select_mitigation();
98         l1tf_select_mitigation();
99         mds_select_mitigation();
100         taa_select_mitigation();
101
102         arch_smt_update();
103
104 #ifdef CONFIG_X86_32
105         /*
106          * Check whether we are able to run this kernel safely on SMP.
107          *
108          * - i386 is no longer supported.
109          * - In order to run on anything without a TSC, we need to be
110          *   compiled for a i486.
111          */
112         if (boot_cpu_data.x86 < 4)
113                 panic("Kernel requires i486+ for 'invlpg' and other features");
114
115         init_utsname()->machine[1] =
116                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
117         alternative_instructions();
118
119         fpu__init_check_bugs();
120 #else /* CONFIG_X86_64 */
121         alternative_instructions();
122
123         /*
124          * Make sure the first 2MB area is not mapped by huge pages
125          * There are typically fixed size MTRRs in there and overlapping
126          * MTRRs into large pages causes slow downs.
127          *
128          * Right now we don't do that with gbpages because there seems
129          * very little benefit for that case.
130          */
131         if (!direct_gbpages)
132                 set_memory_4k((unsigned long)__va(0), 1);
133 #endif
134 }
135
136 void
137 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
138 {
139         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
140         struct thread_info *ti = current_thread_info();
141
142         /* Is MSR_SPEC_CTRL implemented ? */
143         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
144                 /*
145                  * Restrict guest_spec_ctrl to supported values. Clear the
146                  * modifiable bits in the host base value and or the
147                  * modifiable bits from the guest value.
148                  */
149                 guestval = hostval & ~x86_spec_ctrl_mask;
150                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
151
152                 /* SSBD controlled in MSR_SPEC_CTRL */
153                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
154                     static_cpu_has(X86_FEATURE_AMD_SSBD))
155                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
156
157                 /* Conditional STIBP enabled? */
158                 if (static_branch_unlikely(&switch_to_cond_stibp))
159                         hostval |= stibp_tif_to_spec_ctrl(ti->flags);
160
161                 if (hostval != guestval) {
162                         msrval = setguest ? guestval : hostval;
163                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
164                 }
165         }
166
167         /*
168          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
169          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
170          */
171         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
172             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
173                 return;
174
175         /*
176          * If the host has SSBD mitigation enabled, force it in the host's
177          * virtual MSR value. If its not permanently enabled, evaluate
178          * current's TIF_SSBD thread flag.
179          */
180         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
181                 hostval = SPEC_CTRL_SSBD;
182         else
183                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
184
185         /* Sanitize the guest value */
186         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
187
188         if (hostval != guestval) {
189                 unsigned long tif;
190
191                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
192                                  ssbd_spec_ctrl_to_tif(hostval);
193
194                 speculation_ctrl_update(tif);
195         }
196 }
197 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
198
199 static void x86_amd_ssb_disable(void)
200 {
201         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
202
203         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
204                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
205         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
206                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
207 }
208
209 #undef pr_fmt
210 #define pr_fmt(fmt)     "MDS: " fmt
211
212 /* Default mitigation for MDS-affected CPUs */
213 static enum mds_mitigations mds_mitigation = MDS_MITIGATION_FULL;
214
215 static const char * const mds_strings[] = {
216         [MDS_MITIGATION_OFF]    = "Vulnerable",
217         [MDS_MITIGATION_FULL]   = "Mitigation: Clear CPU buffers",
218         [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
219 };
220
221 static void __init mds_select_mitigation(void)
222 {
223         if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
224                 mds_mitigation = MDS_MITIGATION_OFF;
225                 return;
226         }
227
228         if (mds_mitigation == MDS_MITIGATION_FULL) {
229                 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
230                         mds_mitigation = MDS_MITIGATION_VMWERV;
231                 static_branch_enable(&mds_user_clear);
232         }
233         pr_info("%s\n", mds_strings[mds_mitigation]);
234 }
235
236 static int __init mds_cmdline(char *str)
237 {
238         if (!boot_cpu_has_bug(X86_BUG_MDS))
239                 return 0;
240
241         if (!str)
242                 return -EINVAL;
243
244         if (!strcmp(str, "off"))
245                 mds_mitigation = MDS_MITIGATION_OFF;
246         else if (!strcmp(str, "full"))
247                 mds_mitigation = MDS_MITIGATION_FULL;
248
249         return 0;
250 }
251 early_param("mds", mds_cmdline);
252
253 #undef pr_fmt
254 #define pr_fmt(fmt)     "TAA: " fmt
255
256 /* Default mitigation for TAA-affected CPUs */
257 static enum taa_mitigations taa_mitigation = TAA_MITIGATION_VERW;
258
259 static const char * const taa_strings[] = {
260         [TAA_MITIGATION_OFF]            = "Vulnerable",
261         [TAA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
262         [TAA_MITIGATION_VERW]           = "Mitigation: Clear CPU buffers",
263         [TAA_MITIGATION_TSX_DISABLED]   = "Mitigation: TSX disabled",
264 };
265
266 static void __init taa_select_mitigation(void)
267 {
268         u64 ia32_cap;
269
270         if (!boot_cpu_has_bug(X86_BUG_TAA)) {
271                 taa_mitigation = TAA_MITIGATION_OFF;
272                 return;
273         }
274
275         /* TSX previously disabled by tsx=off */
276         if (!boot_cpu_has(X86_FEATURE_RTM)) {
277                 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
278                 goto out;
279         }
280
281         if (cpu_mitigations_off()) {
282                 taa_mitigation = TAA_MITIGATION_OFF;
283                 return;
284         }
285
286         /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */
287         if (taa_mitigation == TAA_MITIGATION_OFF)
288                 goto out;
289
290         if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
291                 taa_mitigation = TAA_MITIGATION_VERW;
292         else
293                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
294
295         /*
296          * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
297          * A microcode update fixes this behavior to clear CPU buffers. It also
298          * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
299          * ARCH_CAP_TSX_CTRL_MSR bit.
300          *
301          * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
302          * update is required.
303          */
304         ia32_cap = x86_read_arch_cap_msr();
305         if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
306             !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
307                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
308
309         /*
310          * TSX is enabled, select alternate mitigation for TAA which is
311          * the same as MDS. Enable MDS static branch to clear CPU buffers.
312          *
313          * For guests that can't determine whether the correct microcode is
314          * present on host, enable the mitigation for UCODE_NEEDED as well.
315          */
316         static_branch_enable(&mds_user_clear);
317
318 out:
319         pr_info("%s\n", taa_strings[taa_mitigation]);
320 }
321
322 static int __init tsx_async_abort_parse_cmdline(char *str)
323 {
324         if (!boot_cpu_has_bug(X86_BUG_TAA))
325                 return 0;
326
327         if (!str)
328                 return -EINVAL;
329
330         if (!strcmp(str, "off")) {
331                 taa_mitigation = TAA_MITIGATION_OFF;
332         } else if (!strcmp(str, "full")) {
333                 taa_mitigation = TAA_MITIGATION_VERW;
334         }
335
336         return 0;
337 }
338 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
339
340 #undef pr_fmt
341 #define pr_fmt(fmt)     "Spectre V1 : " fmt
342
343 enum spectre_v1_mitigation {
344         SPECTRE_V1_MITIGATION_NONE,
345         SPECTRE_V1_MITIGATION_AUTO,
346 };
347
348 static enum spectre_v1_mitigation spectre_v1_mitigation =
349         SPECTRE_V1_MITIGATION_AUTO;
350
351 static const char * const spectre_v1_strings[] = {
352         [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
353         [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
354 };
355
356 /*
357  * Does SMAP provide full mitigation against speculative kernel access to
358  * userspace?
359  */
360 static bool smap_works_speculatively(void)
361 {
362         if (!boot_cpu_has(X86_FEATURE_SMAP))
363                 return false;
364
365         /*
366          * On CPUs which are vulnerable to Meltdown, SMAP does not
367          * prevent speculative access to user data in the L1 cache.
368          * Consider SMAP to be non-functional as a mitigation on these
369          * CPUs.
370          */
371         if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
372                 return false;
373
374         return true;
375 }
376
377 static void __init spectre_v1_select_mitigation(void)
378 {
379         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
380                 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
381                 return;
382         }
383
384         if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
385                 /*
386                  * With Spectre v1, a user can speculatively control either
387                  * path of a conditional swapgs with a user-controlled GS
388                  * value.  The mitigation is to add lfences to both code paths.
389                  *
390                  * If FSGSBASE is enabled, the user can put a kernel address in
391                  * GS, in which case SMAP provides no protection.
392                  *
393                  * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
394                  *         FSGSBASE enablement patches have been merged. ]
395                  *
396                  * If FSGSBASE is disabled, the user can only put a user space
397                  * address in GS.  That makes an attack harder, but still
398                  * possible if there's no SMAP protection.
399                  */
400                 if (!smap_works_speculatively()) {
401                         /*
402                          * Mitigation can be provided from SWAPGS itself or
403                          * PTI as the CR3 write in the Meltdown mitigation
404                          * is serializing.
405                          *
406                          * If neither is there, mitigate with an LFENCE to
407                          * stop speculation through swapgs.
408                          */
409                         if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
410                             !boot_cpu_has(X86_FEATURE_KAISER))
411                                 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
412
413                         /*
414                          * Enable lfences in the kernel entry (non-swapgs)
415                          * paths, to prevent user entry from speculatively
416                          * skipping swapgs.
417                          */
418                         setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
419                 }
420         }
421
422         pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
423 }
424
425 static int __init nospectre_v1_cmdline(char *str)
426 {
427         spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
428         return 0;
429 }
430 early_param("nospectre_v1", nospectre_v1_cmdline);
431
432 #undef pr_fmt
433 #define pr_fmt(fmt)     "Spectre V2 : " fmt
434
435 static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
436
437 static enum spectre_v2_user_mitigation spectre_v2_user = SPECTRE_V2_USER_NONE;
438
439 #ifdef RETPOLINE
440 static bool spectre_v2_bad_module;
441
442 bool retpoline_module_ok(bool has_retpoline)
443 {
444         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
445                 return true;
446
447         pr_err("System may be vulnerable to spectre v2\n");
448         spectre_v2_bad_module = true;
449         return false;
450 }
451
452 static inline const char *spectre_v2_module_string(void)
453 {
454         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
455 }
456 #else
457 static inline const char *spectre_v2_module_string(void) { return ""; }
458 #endif
459
460 static inline bool match_option(const char *arg, int arglen, const char *opt)
461 {
462         int len = strlen(opt);
463
464         return len == arglen && !strncmp(arg, opt, len);
465 }
466
467 /* The kernel command line selection for spectre v2 */
468 enum spectre_v2_mitigation_cmd {
469         SPECTRE_V2_CMD_NONE,
470         SPECTRE_V2_CMD_AUTO,
471         SPECTRE_V2_CMD_FORCE,
472         SPECTRE_V2_CMD_RETPOLINE,
473         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
474         SPECTRE_V2_CMD_RETPOLINE_AMD,
475 };
476
477 enum spectre_v2_user_cmd {
478         SPECTRE_V2_USER_CMD_NONE,
479         SPECTRE_V2_USER_CMD_AUTO,
480         SPECTRE_V2_USER_CMD_FORCE,
481         SPECTRE_V2_USER_CMD_PRCTL,
482         SPECTRE_V2_USER_CMD_PRCTL_IBPB,
483         SPECTRE_V2_USER_CMD_SECCOMP,
484         SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
485 };
486
487 static const char * const spectre_v2_user_strings[] = {
488         [SPECTRE_V2_USER_NONE]          = "User space: Vulnerable",
489         [SPECTRE_V2_USER_STRICT]        = "User space: Mitigation: STIBP protection",
490         [SPECTRE_V2_USER_PRCTL]         = "User space: Mitigation: STIBP via prctl",
491         [SPECTRE_V2_USER_SECCOMP]       = "User space: Mitigation: STIBP via seccomp and prctl",
492 };
493
494 static const struct {
495         const char                      *option;
496         enum spectre_v2_user_cmd        cmd;
497         bool                            secure;
498 } v2_user_options[] __initconst = {
499         { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
500         { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
501         { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
502         { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
503         { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
504         { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
505         { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
506 };
507
508 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
509 {
510         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
511                 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
512 }
513
514 static enum spectre_v2_user_cmd __init
515 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
516 {
517         char arg[20];
518         int ret, i;
519
520         switch (v2_cmd) {
521         case SPECTRE_V2_CMD_NONE:
522                 return SPECTRE_V2_USER_CMD_NONE;
523         case SPECTRE_V2_CMD_FORCE:
524                 return SPECTRE_V2_USER_CMD_FORCE;
525         default:
526                 break;
527         }
528
529         ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
530                                   arg, sizeof(arg));
531         if (ret < 0)
532                 return SPECTRE_V2_USER_CMD_AUTO;
533
534         for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
535                 if (match_option(arg, ret, v2_user_options[i].option)) {
536                         spec_v2_user_print_cond(v2_user_options[i].option,
537                                                 v2_user_options[i].secure);
538                         return v2_user_options[i].cmd;
539                 }
540         }
541
542         pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
543         return SPECTRE_V2_USER_CMD_AUTO;
544 }
545
546 static void __init
547 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
548 {
549         enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
550         bool smt_possible = IS_ENABLED(CONFIG_SMP);
551         enum spectre_v2_user_cmd cmd;
552
553         if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
554                 return;
555
556         if (!IS_ENABLED(CONFIG_SMP))
557                 smt_possible = false;
558
559         cmd = spectre_v2_parse_user_cmdline(v2_cmd);
560         switch (cmd) {
561         case SPECTRE_V2_USER_CMD_NONE:
562                 goto set_mode;
563         case SPECTRE_V2_USER_CMD_FORCE:
564                 mode = SPECTRE_V2_USER_STRICT;
565                 break;
566         case SPECTRE_V2_USER_CMD_PRCTL:
567         case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
568                 mode = SPECTRE_V2_USER_PRCTL;
569                 break;
570         case SPECTRE_V2_USER_CMD_AUTO:
571         case SPECTRE_V2_USER_CMD_SECCOMP:
572         case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
573                 if (IS_ENABLED(CONFIG_SECCOMP))
574                         mode = SPECTRE_V2_USER_SECCOMP;
575                 else
576                         mode = SPECTRE_V2_USER_PRCTL;
577                 break;
578         }
579
580         /* Initialize Indirect Branch Prediction Barrier */
581         if (boot_cpu_has(X86_FEATURE_IBPB)) {
582                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
583
584                 switch (cmd) {
585                 case SPECTRE_V2_USER_CMD_FORCE:
586                 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
587                 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
588                         static_branch_enable(&switch_mm_always_ibpb);
589                         break;
590                 case SPECTRE_V2_USER_CMD_PRCTL:
591                 case SPECTRE_V2_USER_CMD_AUTO:
592                 case SPECTRE_V2_USER_CMD_SECCOMP:
593                         static_branch_enable(&switch_mm_cond_ibpb);
594                         break;
595                 default:
596                         break;
597                 }
598
599                 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
600                         static_key_enabled(&switch_mm_always_ibpb) ?
601                         "always-on" : "conditional");
602         }
603
604         /* If enhanced IBRS is enabled no STIPB required */
605         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
606                 return;
607
608         /*
609          * If SMT is not possible or STIBP is not available clear the STIPB
610          * mode.
611          */
612         if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
613                 mode = SPECTRE_V2_USER_NONE;
614 set_mode:
615         spectre_v2_user = mode;
616         /* Only print the STIBP mode when SMT possible */
617         if (smt_possible)
618                 pr_info("%s\n", spectre_v2_user_strings[mode]);
619 }
620
621 static const char * const spectre_v2_strings[] = {
622         [SPECTRE_V2_NONE]                       = "Vulnerable",
623         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
624         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
625         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
626         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
627         [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
628 };
629
630 static const struct {
631         const char *option;
632         enum spectre_v2_mitigation_cmd cmd;
633         bool secure;
634 } mitigation_options[] __initconst = {
635         { "off",                SPECTRE_V2_CMD_NONE,              false },
636         { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
637         { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
638         { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
639         { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
640         { "auto",               SPECTRE_V2_CMD_AUTO,              false },
641 };
642
643 static void __init spec_v2_print_cond(const char *reason, bool secure)
644 {
645         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
646                 pr_info("%s selected on command line.\n", reason);
647 }
648
649 static inline bool retp_compiler(void)
650 {
651         return __is_defined(RETPOLINE);
652 }
653
654 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
655 {
656         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
657         char arg[20];
658         int ret, i;
659
660         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
661             cpu_mitigations_off())
662                 return SPECTRE_V2_CMD_NONE;
663
664         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
665         if (ret < 0)
666                 return SPECTRE_V2_CMD_AUTO;
667
668         for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
669                 if (!match_option(arg, ret, mitigation_options[i].option))
670                         continue;
671                 cmd = mitigation_options[i].cmd;
672                 break;
673         }
674
675         if (i >= ARRAY_SIZE(mitigation_options)) {
676                 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
677                 return SPECTRE_V2_CMD_AUTO;
678         }
679
680         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
681              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
682              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
683             !IS_ENABLED(CONFIG_RETPOLINE)) {
684                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
685                 return SPECTRE_V2_CMD_AUTO;
686         }
687
688         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
689             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
690                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
691                 return SPECTRE_V2_CMD_AUTO;
692         }
693
694         spec_v2_print_cond(mitigation_options[i].option,
695                            mitigation_options[i].secure);
696         return cmd;
697 }
698
699 static void __init spectre_v2_select_mitigation(void)
700 {
701         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
702         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
703
704         /*
705          * If the CPU is not affected and the command line mode is NONE or AUTO
706          * then nothing to do.
707          */
708         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
709             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
710                 return;
711
712         switch (cmd) {
713         case SPECTRE_V2_CMD_NONE:
714                 return;
715
716         case SPECTRE_V2_CMD_FORCE:
717         case SPECTRE_V2_CMD_AUTO:
718                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
719                         mode = SPECTRE_V2_IBRS_ENHANCED;
720                         /* Force it so VMEXIT will restore correctly */
721                         x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
722                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
723                         goto specv2_set_mode;
724                 }
725                 if (IS_ENABLED(CONFIG_RETPOLINE))
726                         goto retpoline_auto;
727                 break;
728         case SPECTRE_V2_CMD_RETPOLINE_AMD:
729                 if (IS_ENABLED(CONFIG_RETPOLINE))
730                         goto retpoline_amd;
731                 break;
732         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
733                 if (IS_ENABLED(CONFIG_RETPOLINE))
734                         goto retpoline_generic;
735                 break;
736         case SPECTRE_V2_CMD_RETPOLINE:
737                 if (IS_ENABLED(CONFIG_RETPOLINE))
738                         goto retpoline_auto;
739                 break;
740         }
741         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
742         return;
743
744 retpoline_auto:
745         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
746         retpoline_amd:
747                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
748                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
749                         goto retpoline_generic;
750                 }
751                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
752                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
753                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
754                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
755         } else {
756         retpoline_generic:
757                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
758                                          SPECTRE_V2_RETPOLINE_MINIMAL;
759                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
760         }
761
762 specv2_set_mode:
763         spectre_v2_enabled = mode;
764         pr_info("%s\n", spectre_v2_strings[mode]);
765
766         /*
767          * If spectre v2 protection has been enabled, unconditionally fill
768          * RSB during a context switch; this protects against two independent
769          * issues:
770          *
771          *      - RSB underflow (and switch to BTB) on Skylake+
772          *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
773          */
774         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
775         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
776
777         /*
778          * Retpoline means the kernel is safe because it has no indirect
779          * branches. Enhanced IBRS protects firmware too, so, enable restricted
780          * speculation around firmware calls only when Enhanced IBRS isn't
781          * supported.
782          *
783          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
784          * the user might select retpoline on the kernel command line and if
785          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
786          * enable IBRS around firmware calls.
787          */
788         if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
789                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
790                 pr_info("Enabling Restricted Speculation for firmware calls\n");
791         }
792
793         /* Set up IBPB and STIBP depending on the general spectre V2 command */
794         spectre_v2_user_select_mitigation(cmd);
795 }
796
797 static void update_stibp_msr(void * __unused)
798 {
799         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
800 }
801
802 /* Update x86_spec_ctrl_base in case SMT state changed. */
803 static void update_stibp_strict(void)
804 {
805         u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
806
807         if (sched_smt_active())
808                 mask |= SPEC_CTRL_STIBP;
809
810         if (mask == x86_spec_ctrl_base)
811                 return;
812
813         pr_info("Update user space SMT mitigation: STIBP %s\n",
814                 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
815         x86_spec_ctrl_base = mask;
816         on_each_cpu(update_stibp_msr, NULL, 1);
817 }
818
819 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
820 static void update_indir_branch_cond(void)
821 {
822         if (sched_smt_active())
823                 static_branch_enable(&switch_to_cond_stibp);
824         else
825                 static_branch_disable(&switch_to_cond_stibp);
826 }
827
828 #undef pr_fmt
829 #define pr_fmt(fmt) fmt
830
831 /* Update the static key controlling the MDS CPU buffer clear in idle */
832 static void update_mds_branch_idle(void)
833 {
834         /*
835          * Enable the idle clearing if SMT is active on CPUs which are
836          * affected only by MSBDS and not any other MDS variant.
837          *
838          * The other variants cannot be mitigated when SMT is enabled, so
839          * clearing the buffers on idle just to prevent the Store Buffer
840          * repartitioning leak would be a window dressing exercise.
841          */
842         if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
843                 return;
844
845         if (sched_smt_active())
846                 static_branch_enable(&mds_idle_clear);
847         else
848                 static_branch_disable(&mds_idle_clear);
849 }
850
851 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
852 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
853
854 void arch_smt_update(void)
855 {
856         mutex_lock(&spec_ctrl_mutex);
857
858         switch (spectre_v2_user) {
859         case SPECTRE_V2_USER_NONE:
860                 break;
861         case SPECTRE_V2_USER_STRICT:
862                 update_stibp_strict();
863                 break;
864         case SPECTRE_V2_USER_PRCTL:
865         case SPECTRE_V2_USER_SECCOMP:
866                 update_indir_branch_cond();
867                 break;
868         }
869
870         switch (mds_mitigation) {
871         case MDS_MITIGATION_FULL:
872         case MDS_MITIGATION_VMWERV:
873                 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
874                         pr_warn_once(MDS_MSG_SMT);
875                 update_mds_branch_idle();
876                 break;
877         case MDS_MITIGATION_OFF:
878                 break;
879         }
880
881         switch (taa_mitigation) {
882         case TAA_MITIGATION_VERW:
883         case TAA_MITIGATION_UCODE_NEEDED:
884                 if (sched_smt_active())
885                         pr_warn_once(TAA_MSG_SMT);
886                 break;
887         case TAA_MITIGATION_TSX_DISABLED:
888         case TAA_MITIGATION_OFF:
889                 break;
890         }
891
892         mutex_unlock(&spec_ctrl_mutex);
893 }
894
895 #undef pr_fmt
896 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
897
898 static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
899
900 /* The kernel command line selection */
901 enum ssb_mitigation_cmd {
902         SPEC_STORE_BYPASS_CMD_NONE,
903         SPEC_STORE_BYPASS_CMD_AUTO,
904         SPEC_STORE_BYPASS_CMD_ON,
905         SPEC_STORE_BYPASS_CMD_PRCTL,
906         SPEC_STORE_BYPASS_CMD_SECCOMP,
907 };
908
909 static const char * const ssb_strings[] = {
910         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
911         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
912         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
913         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
914 };
915
916 static const struct {
917         const char *option;
918         enum ssb_mitigation_cmd cmd;
919 } ssb_mitigation_options[]  __initconst = {
920         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
921         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
922         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
923         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
924         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
925 };
926
927 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
928 {
929         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
930         char arg[20];
931         int ret, i;
932
933         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
934             cpu_mitigations_off()) {
935                 return SPEC_STORE_BYPASS_CMD_NONE;
936         } else {
937                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
938                                           arg, sizeof(arg));
939                 if (ret < 0)
940                         return SPEC_STORE_BYPASS_CMD_AUTO;
941
942                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
943                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
944                                 continue;
945
946                         cmd = ssb_mitigation_options[i].cmd;
947                         break;
948                 }
949
950                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
951                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
952                         return SPEC_STORE_BYPASS_CMD_AUTO;
953                 }
954         }
955
956         return cmd;
957 }
958
959 static enum ssb_mitigation __init __ssb_select_mitigation(void)
960 {
961         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
962         enum ssb_mitigation_cmd cmd;
963
964         if (!boot_cpu_has(X86_FEATURE_SSBD))
965                 return mode;
966
967         cmd = ssb_parse_cmdline();
968         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
969             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
970              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
971                 return mode;
972
973         switch (cmd) {
974         case SPEC_STORE_BYPASS_CMD_AUTO:
975         case SPEC_STORE_BYPASS_CMD_SECCOMP:
976                 /*
977                  * Choose prctl+seccomp as the default mode if seccomp is
978                  * enabled.
979                  */
980                 if (IS_ENABLED(CONFIG_SECCOMP))
981                         mode = SPEC_STORE_BYPASS_SECCOMP;
982                 else
983                         mode = SPEC_STORE_BYPASS_PRCTL;
984                 break;
985         case SPEC_STORE_BYPASS_CMD_ON:
986                 mode = SPEC_STORE_BYPASS_DISABLE;
987                 break;
988         case SPEC_STORE_BYPASS_CMD_PRCTL:
989                 mode = SPEC_STORE_BYPASS_PRCTL;
990                 break;
991         case SPEC_STORE_BYPASS_CMD_NONE:
992                 break;
993         }
994
995         /*
996          * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
997          * bit in the mask to allow guests to use the mitigation even in the
998          * case where the host does not enable it.
999          */
1000         if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
1001             static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1002                 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1003         }
1004
1005         /*
1006          * We have three CPU feature flags that are in play here:
1007          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1008          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1009          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1010          */
1011         if (mode == SPEC_STORE_BYPASS_DISABLE) {
1012                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1013                 /*
1014                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1015                  * use a completely different MSR and bit dependent on family.
1016                  */
1017                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1018                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1019                         x86_amd_ssb_disable();
1020                 } else {
1021                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1022                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1023                 }
1024         }
1025
1026         return mode;
1027 }
1028
1029 static void ssb_select_mitigation(void)
1030 {
1031         ssb_mode = __ssb_select_mitigation();
1032
1033         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1034                 pr_info("%s\n", ssb_strings[ssb_mode]);
1035 }
1036
1037 #undef pr_fmt
1038 #define pr_fmt(fmt)     "Speculation prctl: " fmt
1039
1040 static void task_update_spec_tif(struct task_struct *tsk)
1041 {
1042         /* Force the update of the real TIF bits */
1043         set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1044
1045         /*
1046          * Immediately update the speculation control MSRs for the current
1047          * task, but for a non-current task delay setting the CPU
1048          * mitigation until it is scheduled next.
1049          *
1050          * This can only happen for SECCOMP mitigation. For PRCTL it's
1051          * always the current task.
1052          */
1053         if (tsk == current)
1054                 speculation_ctrl_update_current();
1055 }
1056
1057 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1058 {
1059         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1060             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1061                 return -ENXIO;
1062
1063         switch (ctrl) {
1064         case PR_SPEC_ENABLE:
1065                 /* If speculation is force disabled, enable is not allowed */
1066                 if (task_spec_ssb_force_disable(task))
1067                         return -EPERM;
1068                 task_clear_spec_ssb_disable(task);
1069                 task_update_spec_tif(task);
1070                 break;
1071         case PR_SPEC_DISABLE:
1072                 task_set_spec_ssb_disable(task);
1073                 task_update_spec_tif(task);
1074                 break;
1075         case PR_SPEC_FORCE_DISABLE:
1076                 task_set_spec_ssb_disable(task);
1077                 task_set_spec_ssb_force_disable(task);
1078                 task_update_spec_tif(task);
1079                 break;
1080         default:
1081                 return -ERANGE;
1082         }
1083         return 0;
1084 }
1085
1086 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
1087 {
1088         switch (ctrl) {
1089         case PR_SPEC_ENABLE:
1090                 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
1091                         return 0;
1092                 /*
1093                  * Indirect branch speculation is always disabled in strict
1094                  * mode.
1095                  */
1096                 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
1097                         return -EPERM;
1098                 task_clear_spec_ib_disable(task);
1099                 task_update_spec_tif(task);
1100                 break;
1101         case PR_SPEC_DISABLE:
1102         case PR_SPEC_FORCE_DISABLE:
1103                 /*
1104                  * Indirect branch speculation is always allowed when
1105                  * mitigation is force disabled.
1106                  */
1107                 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
1108                         return -EPERM;
1109                 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
1110                         return 0;
1111                 task_set_spec_ib_disable(task);
1112                 if (ctrl == PR_SPEC_FORCE_DISABLE)
1113                         task_set_spec_ib_force_disable(task);
1114                 task_update_spec_tif(task);
1115                 break;
1116         default:
1117                 return -ERANGE;
1118         }
1119         return 0;
1120 }
1121
1122 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1123                              unsigned long ctrl)
1124 {
1125         switch (which) {
1126         case PR_SPEC_STORE_BYPASS:
1127                 return ssb_prctl_set(task, ctrl);
1128         case PR_SPEC_INDIRECT_BRANCH:
1129                 return ib_prctl_set(task, ctrl);
1130         default:
1131                 return -ENODEV;
1132         }
1133 }
1134
1135 #ifdef CONFIG_SECCOMP
1136 void arch_seccomp_spec_mitigate(struct task_struct *task)
1137 {
1138         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1139                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1140         if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
1141                 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1142 }
1143 #endif
1144
1145 static int ssb_prctl_get(struct task_struct *task)
1146 {
1147         switch (ssb_mode) {
1148         case SPEC_STORE_BYPASS_DISABLE:
1149                 return PR_SPEC_DISABLE;
1150         case SPEC_STORE_BYPASS_SECCOMP:
1151         case SPEC_STORE_BYPASS_PRCTL:
1152                 if (task_spec_ssb_force_disable(task))
1153                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1154                 if (task_spec_ssb_disable(task))
1155                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1156                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1157         default:
1158                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1159                         return PR_SPEC_ENABLE;
1160                 return PR_SPEC_NOT_AFFECTED;
1161         }
1162 }
1163
1164 static int ib_prctl_get(struct task_struct *task)
1165 {
1166         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1167                 return PR_SPEC_NOT_AFFECTED;
1168
1169         switch (spectre_v2_user) {
1170         case SPECTRE_V2_USER_NONE:
1171                 return PR_SPEC_ENABLE;
1172         case SPECTRE_V2_USER_PRCTL:
1173         case SPECTRE_V2_USER_SECCOMP:
1174                 if (task_spec_ib_force_disable(task))
1175                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1176                 if (task_spec_ib_disable(task))
1177                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1178                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1179         case SPECTRE_V2_USER_STRICT:
1180                 return PR_SPEC_DISABLE;
1181         default:
1182                 return PR_SPEC_NOT_AFFECTED;
1183         }
1184 }
1185
1186 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1187 {
1188         switch (which) {
1189         case PR_SPEC_STORE_BYPASS:
1190                 return ssb_prctl_get(task);
1191         case PR_SPEC_INDIRECT_BRANCH:
1192                 return ib_prctl_get(task);
1193         default:
1194                 return -ENODEV;
1195         }
1196 }
1197
1198 void x86_spec_ctrl_setup_ap(void)
1199 {
1200         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1201                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1202
1203         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1204                 x86_amd_ssb_disable();
1205 }
1206
1207 #undef pr_fmt
1208 #define pr_fmt(fmt)     "L1TF: " fmt
1209
1210 /*
1211  * These CPUs all support 44bits physical address space internally in the
1212  * cache but CPUID can report a smaller number of physical address bits.
1213  *
1214  * The L1TF mitigation uses the top most address bit for the inversion of
1215  * non present PTEs. When the installed memory reaches into the top most
1216  * address bit due to memory holes, which has been observed on machines
1217  * which report 36bits physical address bits and have 32G RAM installed,
1218  * then the mitigation range check in l1tf_select_mitigation() triggers.
1219  * This is a false positive because the mitigation is still possible due to
1220  * the fact that the cache uses 44bit internally. Use the cache bits
1221  * instead of the reported physical bits and adjust them on the affected
1222  * machines to 44bit if the reported bits are less than 44.
1223  */
1224 static void override_cache_bits(struct cpuinfo_x86 *c)
1225 {
1226         if (c->x86 != 6)
1227                 return;
1228
1229         switch (c->x86_model) {
1230         case INTEL_FAM6_NEHALEM:
1231         case INTEL_FAM6_WESTMERE:
1232         case INTEL_FAM6_SANDYBRIDGE:
1233         case INTEL_FAM6_IVYBRIDGE:
1234         case INTEL_FAM6_HASWELL_CORE:
1235         case INTEL_FAM6_HASWELL_ULT:
1236         case INTEL_FAM6_HASWELL_GT3E:
1237         case INTEL_FAM6_BROADWELL_CORE:
1238         case INTEL_FAM6_BROADWELL_GT3E:
1239         case INTEL_FAM6_SKYLAKE_MOBILE:
1240         case INTEL_FAM6_SKYLAKE_DESKTOP:
1241         case INTEL_FAM6_KABYLAKE_MOBILE:
1242         case INTEL_FAM6_KABYLAKE_DESKTOP:
1243                 if (c->x86_cache_bits < 44)
1244                         c->x86_cache_bits = 44;
1245                 break;
1246         }
1247 }
1248
1249 static void __init l1tf_select_mitigation(void)
1250 {
1251         u64 half_pa;
1252
1253         if (!boot_cpu_has_bug(X86_BUG_L1TF))
1254                 return;
1255
1256         override_cache_bits(&boot_cpu_data);
1257
1258 #if CONFIG_PGTABLE_LEVELS == 2
1259         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1260         return;
1261 #endif
1262
1263         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1264         if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
1265                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1266                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1267                                 half_pa);
1268                 pr_info("However, doing so will make a part of your RAM unusable.\n");
1269                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1270                 return;
1271         }
1272
1273         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1274 }
1275 #undef pr_fmt
1276 #define pr_fmt(fmt) fmt
1277
1278 #ifdef CONFIG_SYSFS
1279
1280 static ssize_t itlb_multihit_show_state(char *buf)
1281 {
1282         return sprintf(buf, "Processor vulnerable\n");
1283 }
1284
1285 static ssize_t mds_show_state(char *buf)
1286 {
1287 #ifdef CONFIG_HYPERVISOR_GUEST
1288         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1289                 return sprintf(buf, "%s; SMT Host state unknown\n",
1290                                mds_strings[mds_mitigation]);
1291         }
1292 #endif
1293
1294         if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1295                 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1296                                (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1297                                 sched_smt_active() ? "mitigated" : "disabled"));
1298         }
1299
1300         return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1301                        sched_smt_active() ? "vulnerable" : "disabled");
1302 }
1303
1304 static ssize_t tsx_async_abort_show_state(char *buf)
1305 {
1306         if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
1307             (taa_mitigation == TAA_MITIGATION_OFF))
1308                 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
1309
1310         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1311                 return sprintf(buf, "%s; SMT Host state unknown\n",
1312                                taa_strings[taa_mitigation]);
1313         }
1314
1315         return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
1316                        sched_smt_active() ? "vulnerable" : "disabled");
1317 }
1318
1319 static char *stibp_state(void)
1320 {
1321         if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1322                 return "";
1323
1324         switch (spectre_v2_user) {
1325         case SPECTRE_V2_USER_NONE:
1326                 return ", STIBP: disabled";
1327         case SPECTRE_V2_USER_STRICT:
1328                 return ", STIBP: forced";
1329         case SPECTRE_V2_USER_PRCTL:
1330         case SPECTRE_V2_USER_SECCOMP:
1331                 if (static_key_enabled(&switch_to_cond_stibp))
1332                         return ", STIBP: conditional";
1333         }
1334         return "";
1335 }
1336
1337 static char *ibpb_state(void)
1338 {
1339         if (boot_cpu_has(X86_FEATURE_IBPB)) {
1340                 if (static_key_enabled(&switch_mm_always_ibpb))
1341                         return ", IBPB: always-on";
1342                 if (static_key_enabled(&switch_mm_cond_ibpb))
1343                         return ", IBPB: conditional";
1344                 return ", IBPB: disabled";
1345         }
1346         return "";
1347 }
1348
1349 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1350                                char *buf, unsigned int bug)
1351 {
1352         if (!boot_cpu_has_bug(bug))
1353                 return sprintf(buf, "Not affected\n");
1354
1355         switch (bug) {
1356         case X86_BUG_CPU_MELTDOWN:
1357                 if (boot_cpu_has(X86_FEATURE_KAISER))
1358                         return sprintf(buf, "Mitigation: PTI\n");
1359
1360                 break;
1361
1362         case X86_BUG_SPECTRE_V1:
1363                 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1364
1365         case X86_BUG_SPECTRE_V2:
1366                 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1367                                ibpb_state(),
1368                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1369                                stibp_state(),
1370                                boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1371                                spectre_v2_module_string());
1372
1373         case X86_BUG_SPEC_STORE_BYPASS:
1374                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1375
1376         case X86_BUG_L1TF:
1377                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1378                         return sprintf(buf, "Mitigation: PTE Inversion\n");
1379                 break;
1380
1381         case X86_BUG_MDS:
1382                 return mds_show_state(buf);
1383
1384         case X86_BUG_TAA:
1385                 return tsx_async_abort_show_state(buf);
1386
1387         case X86_BUG_ITLB_MULTIHIT:
1388                 return itlb_multihit_show_state(buf);
1389
1390         default:
1391                 break;
1392         }
1393
1394         return sprintf(buf, "Vulnerable\n");
1395 }
1396
1397 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1398 {
1399         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1400 }
1401
1402 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1403 {
1404         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1405 }
1406
1407 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1408 {
1409         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1410 }
1411
1412 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1413 {
1414         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1415 }
1416
1417 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1418 {
1419         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1420 }
1421
1422 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1423 {
1424         return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1425 }
1426
1427 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
1428 {
1429         return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
1430 }
1431
1432 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
1433 {
1434         return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
1435 }
1436 #endif