2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
5 * derived from arch/x86/kvm/x86.c
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
15 #include <linux/kvm_host.h>
16 #include <linux/export.h>
17 #include <linux/vmalloc.h>
18 #include <linux/uaccess.h>
19 #include <linux/sched/stat.h>
21 #include <asm/processor.h>
23 #include <asm/fpu/xstate.h>
30 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
33 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
35 xstate_bv &= XFEATURE_MASK_EXTEND;
37 if (xstate_bv & 0x1) {
38 u32 eax, ebx, ecx, edx, offset;
39 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
40 offset = compacted ? ret : ebx;
41 ret = max(ret, offset + eax);
51 bool kvm_mpx_supported(void)
53 return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
54 && kvm_x86_ops->mpx_supported());
56 EXPORT_SYMBOL_GPL(kvm_mpx_supported);
58 u64 kvm_supported_xcr0(void)
60 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
62 if (!kvm_mpx_supported())
63 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
68 #define F(x) bit(X86_FEATURE_##x)
70 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
72 struct kvm_cpuid_entry2 *best;
73 struct kvm_lapic *apic = vcpu->arch.apic;
75 best = kvm_find_cpuid_entry(vcpu, 1, 0);
79 /* Update OSXSAVE bit */
80 if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
81 best->ecx &= ~F(OSXSAVE);
82 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
83 best->ecx |= F(OSXSAVE);
86 best->edx &= ~F(APIC);
87 if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
91 if (best->ecx & F(TSC_DEADLINE_TIMER))
92 apic->lapic_timer.timer_mode_mask = 3 << 17;
94 apic->lapic_timer.timer_mode_mask = 1 << 17;
97 best = kvm_find_cpuid_entry(vcpu, 7, 0);
99 /* Update OSPKE bit */
100 if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
101 best->ecx &= ~F(OSPKE);
102 if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
103 best->ecx |= F(OSPKE);
107 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
109 vcpu->arch.guest_supported_xcr0 = 0;
110 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
112 vcpu->arch.guest_supported_xcr0 =
113 (best->eax | ((u64)best->edx << 32)) &
114 kvm_supported_xcr0();
115 vcpu->arch.guest_xstate_size = best->ebx =
116 xstate_required_size(vcpu->arch.xcr0, false);
119 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
120 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
121 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
124 * The existing code assumes virtual address is 48-bit or 57-bit in the
125 * canonical address checks; exit if it is ever changed.
127 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
129 int vaddr_bits = (best->eax & 0xff00) >> 8;
131 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
135 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
136 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
137 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
138 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
140 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
141 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
143 if (vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT)
144 best->ecx |= F(MWAIT);
146 best->ecx &= ~F(MWAIT);
150 /* Update physical-address width */
151 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
152 kvm_mmu_reset_context(vcpu);
154 kvm_pmu_refresh(vcpu);
158 static int is_efer_nx(void)
160 unsigned long long efer = 0;
162 rdmsrl_safe(MSR_EFER, &efer);
163 return efer & EFER_NX;
166 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
169 struct kvm_cpuid_entry2 *e, *entry;
172 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
173 e = &vcpu->arch.cpuid_entries[i];
174 if (e->function == 0x80000001) {
179 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
180 entry->edx &= ~F(NX);
181 printk(KERN_INFO "kvm: guest NX capability removed\n");
185 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
187 struct kvm_cpuid_entry2 *best;
189 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
190 if (!best || best->eax < 0x80000008)
192 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
194 return best->eax & 0xff;
198 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
200 /* when an old userspace process fills a new kernel module */
201 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
202 struct kvm_cpuid *cpuid,
203 struct kvm_cpuid_entry __user *entries)
206 struct kvm_cpuid_entry *cpuid_entries = NULL;
209 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
214 vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
219 if (copy_from_user(cpuid_entries, entries,
220 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
223 for (i = 0; i < cpuid->nent; i++) {
224 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
225 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
226 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
227 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
228 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
229 vcpu->arch.cpuid_entries[i].index = 0;
230 vcpu->arch.cpuid_entries[i].flags = 0;
231 vcpu->arch.cpuid_entries[i].padding[0] = 0;
232 vcpu->arch.cpuid_entries[i].padding[1] = 0;
233 vcpu->arch.cpuid_entries[i].padding[2] = 0;
235 vcpu->arch.cpuid_nent = cpuid->nent;
236 cpuid_fix_nx_cap(vcpu);
237 kvm_apic_set_version(vcpu);
238 kvm_x86_ops->cpuid_update(vcpu);
239 r = kvm_update_cpuid(vcpu);
242 vfree(cpuid_entries);
246 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
247 struct kvm_cpuid2 *cpuid,
248 struct kvm_cpuid_entry2 __user *entries)
253 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
256 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
257 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
259 vcpu->arch.cpuid_nent = cpuid->nent;
260 kvm_apic_set_version(vcpu);
261 kvm_x86_ops->cpuid_update(vcpu);
262 r = kvm_update_cpuid(vcpu);
267 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
268 struct kvm_cpuid2 *cpuid,
269 struct kvm_cpuid_entry2 __user *entries)
274 if (cpuid->nent < vcpu->arch.cpuid_nent)
277 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
278 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
283 cpuid->nent = vcpu->arch.cpuid_nent;
287 static void cpuid_mask(u32 *word, int wordnum)
289 *word &= boot_cpu_data.x86_capability[wordnum];
292 static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function,
295 entry->function = function;
296 entry->index = index;
299 cpuid_count(entry->function, entry->index,
300 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
304 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
312 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
317 static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2 *entry,
318 u32 func, int *nent, int maxnent)
320 entry->function = func;
330 entry->ecx = F(MOVBE);
334 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
336 entry->ecx = F(RDPID);
345 static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
347 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
348 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
349 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
350 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
354 const u32 kvm_cpuid_7_0_ebx_x86_features =
355 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
356 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
357 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
358 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
359 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt;
362 const u32 kvm_cpuid_7_0_ecx_x86_features =
363 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
364 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
365 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
366 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B);
369 const u32 kvm_cpuid_7_0_edx_x86_features =
370 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
371 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
377 entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
378 cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
379 /* TSC_ADJUST is emulated */
380 entry->ebx |= F(TSC_ADJUST);
382 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
383 f_la57 = entry->ecx & F(LA57);
384 cpuid_mask(&entry->ecx, CPUID_7_ECX);
385 /* Set LA57 based on hardware capability. */
386 entry->ecx |= f_la57;
387 entry->ecx |= f_umip;
388 /* PKU is not yet implemented for shadow paging. */
389 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
390 entry->ecx &= ~F(PKU);
392 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
393 cpuid_mask(&entry->edx, CPUID_7_EDX);
395 * We emulate ARCH_CAPABILITIES in software even
396 * if the host doesn't support it.
398 entry->edx |= F(ARCH_CAPABILITIES);
410 static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
411 int *nent, int maxnent)
414 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
416 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
418 unsigned f_lm = F(LM);
420 unsigned f_gbpages = 0;
423 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
424 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
425 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
428 const u32 kvm_cpuid_1_edx_x86_features =
429 F(FPU) | F(VME) | F(DE) | F(PSE) |
430 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
431 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
432 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
433 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
434 0 /* Reserved, DS, ACPI */ | F(MMX) |
435 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
436 0 /* HTT, TM, Reserved, PBE */;
437 /* cpuid 0x80000001.edx */
438 const u32 kvm_cpuid_8000_0001_edx_x86_features =
439 F(FPU) | F(VME) | F(DE) | F(PSE) |
440 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
441 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
442 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
443 F(PAT) | F(PSE36) | 0 /* Reserved */ |
444 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
445 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
446 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
448 const u32 kvm_cpuid_1_ecx_x86_features =
449 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
450 * but *not* advertised to guests via CPUID ! */
451 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
452 0 /* DS-CPL, VMX, SMX, EST */ |
453 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
454 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
455 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
456 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
457 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
459 /* cpuid 0x80000001.ecx */
460 const u32 kvm_cpuid_8000_0001_ecx_x86_features =
461 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
462 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
463 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
464 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
465 F(TOPOEXT) | F(PERFCTR_CORE);
467 /* cpuid 0x80000008.ebx */
468 const u32 kvm_cpuid_8000_0008_ebx_x86_features =
469 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
470 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON);
472 /* cpuid 0xC0000001.edx */
473 const u32 kvm_cpuid_C000_0001_edx_x86_features =
474 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
475 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
478 /* cpuid 0xD.1.eax */
479 const u32 kvm_cpuid_D_1_eax_x86_features =
480 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
482 /* all calls to cpuid_count() should be made on the same cpu */
487 if (*nent >= maxnent)
490 do_host_cpuid(entry, function, 0);
495 /* Limited to the highest leaf implemented in KVM. */
496 entry->eax = min(entry->eax, 0x1fU);
499 entry->edx &= kvm_cpuid_1_edx_x86_features;
500 cpuid_mask(&entry->edx, CPUID_1_EDX);
501 entry->ecx &= kvm_cpuid_1_ecx_x86_features;
502 cpuid_mask(&entry->ecx, CPUID_1_ECX);
503 /* we support x2apic emulation even if host does not support
504 * it since we emulate x2apic in software */
505 entry->ecx |= F(X2APIC);
507 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
508 * may return different values. This forces us to get_cpu() before
509 * issuing the first command, and also to emulate this annoying behavior
510 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
512 int t, times = entry->eax & 0xff;
514 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
515 for (t = 1; t < times; ++t) {
516 if (*nent >= maxnent)
519 do_host_cpuid(&entry[t], function, 0);
524 /* functions 4 and 0x8000001d have additional index. */
529 /* read more entries until cache_type is zero */
531 if (*nent >= maxnent)
534 cache_type = entry[i - 1].eax & 0x1f;
537 do_host_cpuid(&entry[i], function, i);
542 case 6: /* Thermal management */
543 entry->eax = 0x4; /* allow ARAT */
548 /* function 7 has additional index. */
553 do_cpuid_7_mask(&entry[i], i);
556 if (*nent >= maxnent)
560 do_host_cpuid(&entry[i], function, i);
567 case 0xa: { /* Architectural Performance Monitoring */
568 struct x86_pmu_capability cap;
569 union cpuid10_eax eax;
570 union cpuid10_edx edx;
572 perf_get_x86_pmu_capability(&cap);
575 * Only support guest architectural pmu on a host
576 * with architectural pmu.
579 memset(&cap, 0, sizeof(cap));
581 eax.split.version_id = min(cap.version, 2);
582 eax.split.num_counters = cap.num_counters_gp;
583 eax.split.bit_width = cap.bit_width_gp;
584 eax.split.mask_length = cap.events_mask_len;
586 edx.split.num_counters_fixed = cap.num_counters_fixed;
587 edx.split.bit_width_fixed = cap.bit_width_fixed;
588 edx.split.reserved = 0;
590 entry->eax = eax.full;
591 entry->ebx = cap.events_mask;
593 entry->edx = edx.full;
597 * Per Intel's SDM, the 0x1f is a superset of 0xb,
598 * thus they can be handled by common code.
604 /* read more entries until level_type is zero */
606 if (*nent >= maxnent)
609 level_type = entry[i - 1].ecx & 0xff00;
612 do_host_cpuid(&entry[i], function, i);
619 u64 supported = kvm_supported_xcr0();
621 entry->eax &= supported;
622 entry->ebx = xstate_required_size(supported, false);
623 entry->ecx = entry->ebx;
624 entry->edx &= supported >> 32;
628 for (idx = 1, i = 1; idx < 64; ++idx) {
629 u64 mask = ((u64)1 << idx);
630 if (*nent >= maxnent)
633 do_host_cpuid(&entry[i], function, idx);
635 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
636 cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
638 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
640 xstate_required_size(supported,
643 if (entry[i].eax == 0 || !(supported & mask))
645 if (WARN_ON_ONCE(entry[i].ecx & 1))
657 int t, times = entry->eax;
662 for (t = 1; t <= times; ++t) {
663 if (*nent >= maxnent)
665 do_host_cpuid(&entry[t], function, t);
670 case KVM_CPUID_SIGNATURE: {
671 static const char signature[12] = "KVMKVMKVM\0\0";
672 const u32 *sigptr = (const u32 *)signature;
673 entry->eax = KVM_CPUID_FEATURES;
674 entry->ebx = sigptr[0];
675 entry->ecx = sigptr[1];
676 entry->edx = sigptr[2];
679 case KVM_CPUID_FEATURES:
680 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
681 (1 << KVM_FEATURE_NOP_IO_DELAY) |
682 (1 << KVM_FEATURE_CLOCKSOURCE2) |
683 (1 << KVM_FEATURE_ASYNC_PF) |
684 (1 << KVM_FEATURE_PV_EOI) |
685 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
686 (1 << KVM_FEATURE_PV_UNHALT) |
687 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
688 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
689 (1 << KVM_FEATURE_PV_SEND_IPI) |
690 (1 << KVM_FEATURE_POLL_CONTROL) |
691 (1 << KVM_FEATURE_PV_SCHED_YIELD);
694 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
701 entry->eax = min(entry->eax, 0x8000001f);
704 entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
705 cpuid_mask(&entry->edx, CPUID_8000_0001_EDX);
706 entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
707 cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX);
709 case 0x80000007: /* Advanced power management */
710 /* invariant TSC is CPUID.80000007H:EDX[8] */
711 entry->edx &= (1 << 8);
712 /* mask against host */
713 entry->edx &= boot_cpu_data.x86_power;
714 entry->eax = entry->ebx = entry->ecx = 0;
717 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
718 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
719 unsigned phys_as = entry->eax & 0xff;
723 entry->eax = g_phys_as | (virt_as << 8);
726 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
729 if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
730 entry->ebx |= F(AMD_IBPB);
731 if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
732 entry->ebx |= F(AMD_IBRS);
733 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
734 entry->ebx |= F(VIRT_SSBD);
735 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
736 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
738 * The preference is to use SPEC CTRL MSR instead of the
741 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
742 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
743 entry->ebx |= F(VIRT_SSBD);
747 entry->ecx = entry->edx = 0;
752 /*Add support for Centaur's CPUID instruction*/
754 /*Just support up to 0xC0000004 now*/
755 entry->eax = min(entry->eax, 0xC0000004);
758 entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
759 cpuid_mask(&entry->edx, CPUID_C000_0001_EDX);
761 case 3: /* Processor serial number */
762 case 5: /* MONITOR/MWAIT */
767 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
771 kvm_x86_ops->set_supported_cpuid(function, entry);
781 static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func,
782 int *nent, int maxnent, unsigned int type)
784 if (type == KVM_GET_EMULATED_CPUID)
785 return __do_cpuid_func_emulated(entry, func, nent, maxnent);
787 return __do_cpuid_func(entry, func, nent, maxnent);
792 struct kvm_cpuid_param {
795 bool (*qualifier)(const struct kvm_cpuid_param *param);
798 static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
800 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
803 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
804 __u32 num_entries, unsigned int ioctl_type)
809 if (ioctl_type != KVM_GET_EMULATED_CPUID)
813 * We want to make sure that ->padding is being passed clean from
814 * userspace in case we want to use it for something in the future.
816 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
817 * have to give ourselves satisfied only with the emulated side. /me
820 for (i = 0; i < num_entries; i++) {
821 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
824 if (pad[0] || pad[1] || pad[2])
830 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
831 struct kvm_cpuid_entry2 __user *entries,
834 struct kvm_cpuid_entry2 *cpuid_entries;
835 int limit, nent = 0, r = -E2BIG, i;
837 static const struct kvm_cpuid_param param[] = {
838 { .func = 0, .has_leaf_count = true },
839 { .func = 0x80000000, .has_leaf_count = true },
840 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
841 { .func = KVM_CPUID_SIGNATURE },
842 { .func = KVM_CPUID_FEATURES },
847 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
848 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
850 if (sanity_check_entries(entries, cpuid->nent, type))
854 cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
860 for (i = 0; i < ARRAY_SIZE(param); i++) {
861 const struct kvm_cpuid_param *ent = ¶m[i];
863 if (ent->qualifier && !ent->qualifier(ent))
866 r = do_cpuid_func(&cpuid_entries[nent], ent->func,
867 &nent, cpuid->nent, type);
872 if (!ent->has_leaf_count)
875 limit = cpuid_entries[nent - 1].eax;
876 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
877 r = do_cpuid_func(&cpuid_entries[nent], func,
878 &nent, cpuid->nent, type);
885 if (copy_to_user(entries, cpuid_entries,
886 nent * sizeof(struct kvm_cpuid_entry2)))
892 vfree(cpuid_entries);
897 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
899 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
900 struct kvm_cpuid_entry2 *ej;
902 int nent = vcpu->arch.cpuid_nent;
904 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
905 /* when no next entry is found, the current entry[i] is reselected */
908 ej = &vcpu->arch.cpuid_entries[j];
909 } while (ej->function != e->function);
911 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
916 /* find an entry with matching function, matching index (if needed), and that
917 * should be read next (if it's stateful) */
918 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
919 u32 function, u32 index)
921 if (e->function != function)
923 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
925 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
926 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
931 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
932 u32 function, u32 index)
935 struct kvm_cpuid_entry2 *best = NULL;
937 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
938 struct kvm_cpuid_entry2 *e;
940 e = &vcpu->arch.cpuid_entries[i];
941 if (is_matching_cpuid_entry(e, function, index)) {
942 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
943 move_to_next_stateful_cpuid_entry(vcpu, i);
950 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
953 * If no match is found, check whether we exceed the vCPU's limit
954 * and return the content of the highest valid _standard_ leaf instead.
955 * This is to satisfy the CPUID specification.
957 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
958 u32 function, u32 index)
960 struct kvm_cpuid_entry2 *maxlevel;
962 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
963 if (!maxlevel || maxlevel->eax >= function)
965 if (function & 0x80000000) {
966 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
970 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
973 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
974 u32 *ecx, u32 *edx, bool check_limit)
976 u32 function = *eax, index = *ecx;
977 struct kvm_cpuid_entry2 *best;
978 bool entry_found = true;
980 best = kvm_find_cpuid_entry(vcpu, function, index);
987 best = check_cpuid_limit(vcpu, function, index);
997 *eax = *ebx = *ecx = *edx = 0;
998 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
1001 EXPORT_SYMBOL_GPL(kvm_cpuid);
1003 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1005 u32 eax, ebx, ecx, edx;
1007 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1010 eax = kvm_rax_read(vcpu);
1011 ecx = kvm_rcx_read(vcpu);
1012 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
1013 kvm_rax_write(vcpu, eax);
1014 kvm_rbx_write(vcpu, ebx);
1015 kvm_rcx_write(vcpu, ecx);
1016 kvm_rdx_write(vcpu, edx);
1017 return kvm_skip_emulated_instruction(vcpu);
1019 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);