OSDN Git Service

perf/x86/uncore: Correct the number of CHAs on EMR
[tomoyo/tomoyo-test1.git] / tools / testing / selftests / kvm / x86_64 / xcr0_cpuid_test.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * XCR0 cpuid test
4  *
5  * Copyright (C) 2022, Google LLC.
6  */
7
8 #include <fcntl.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/ioctl.h>
13
14 #include "test_util.h"
15
16 #include "kvm_util.h"
17 #include "processor.h"
18
19 /*
20  * Assert that architectural dependency rules are satisfied, e.g. that AVX is
21  * supported if and only if SSE is supported.
22  */
23 #define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies)     \
24 do {                                                                              \
25         uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \
26                                                                                   \
27         GUEST_ASSERT_3((__supported & (xfeatures)) != (xfeatures) ||              \
28                        __supported == ((xfeatures) | (dependencies)),             \
29                        __supported, (xfeatures), (dependencies));                 \
30 } while (0)
31
32 /*
33  * Assert that KVM reports a sane, usable as-is XCR0.  Architecturally, a CPU
34  * isn't strictly required to _support_ all XFeatures related to a feature, but
35  * at the same time XSETBV will #GP if bundled XFeatures aren't enabled and
36  * disabled coherently.  E.g. a CPU can technically enumerate supported for
37  * XTILE_CFG but not XTILE_DATA, but attempting to enable XTILE_CFG without
38  * XTILE_DATA will #GP.
39  */
40 #define ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0, xfeatures)          \
41 do {                                                                    \
42         uint64_t __supported = (supported_xcr0) & (xfeatures);          \
43                                                                         \
44         GUEST_ASSERT_2(!__supported || __supported == (xfeatures),      \
45                        __supported, (xfeatures));                       \
46 } while (0)
47
48 static void guest_code(void)
49 {
50         uint64_t xcr0_reset;
51         uint64_t supported_xcr0;
52         int i, vector;
53
54         set_cr4(get_cr4() | X86_CR4_OSXSAVE);
55
56         xcr0_reset = xgetbv(0);
57         supported_xcr0 = this_cpu_supported_xcr0();
58
59         GUEST_ASSERT(xcr0_reset == XFEATURE_MASK_FP);
60
61         /* Check AVX */
62         ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,
63                                      XFEATURE_MASK_YMM,
64                                      XFEATURE_MASK_SSE);
65
66         /* Check MPX */
67         ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
68                                     XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
69
70         /* Check AVX-512 */
71         ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0,
72                                      XFEATURE_MASK_AVX512,
73                                      XFEATURE_MASK_SSE | XFEATURE_MASK_YMM);
74         ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
75                                     XFEATURE_MASK_AVX512);
76
77         /* Check AMX */
78         ASSERT_ALL_OR_NONE_XFEATURE(supported_xcr0,
79                                     XFEATURE_MASK_XTILE);
80
81         vector = xsetbv_safe(0, supported_xcr0);
82         GUEST_ASSERT_2(!vector, supported_xcr0, vector);
83
84         for (i = 0; i < 64; i++) {
85                 if (supported_xcr0 & BIT_ULL(i))
86                         continue;
87
88                 vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
89                 GUEST_ASSERT_3(vector == GP_VECTOR, supported_xcr0, vector, BIT_ULL(i));
90         }
91
92         GUEST_DONE();
93 }
94
95 int main(int argc, char *argv[])
96 {
97         struct kvm_vcpu *vcpu;
98         struct kvm_run *run;
99         struct kvm_vm *vm;
100         struct ucall uc;
101
102         TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
103
104         vm = vm_create_with_one_vcpu(&vcpu, guest_code);
105         run = vcpu->run;
106
107         vm_init_descriptor_tables(vm);
108         vcpu_init_descriptor_tables(vcpu);
109
110         while (1) {
111                 vcpu_run(vcpu);
112
113                 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
114                             "Unexpected exit reason: %u (%s),\n",
115                             run->exit_reason,
116                             exit_reason_str(run->exit_reason));
117
118                 switch (get_ucall(vcpu, &uc)) {
119                 case UCALL_ABORT:
120                         REPORT_GUEST_ASSERT_3(uc, "0x%lx 0x%lx 0x%lx");
121                         break;
122                 case UCALL_DONE:
123                         goto done;
124                 default:
125                         TEST_FAIL("Unknown ucall %lu", uc.cmd);
126                 }
127         }
128
129 done:
130         kvm_vm_free(vm);
131         return 0;
132 }