OSDN Git Service

kvm: selftests: add vmx_tsc_adjust_test
[uclinux-h8/linux.git] / tools / testing / selftests / kvm / vmx_tsc_adjust_test.c
1 /*
2  * gtests/tests/vmx_tsc_adjust_test.c
3  *
4  * Copyright (C) 2018, Google LLC.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2.
7  *
8  *
9  * IA32_TSC_ADJUST test
10  *
11  * According to the SDM, "if an execution of WRMSR to the
12  * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
13  * the logical processor also adds (or subtracts) value X from the
14  * IA32_TSC_ADJUST MSR.
15  *
16  * Note that when L1 doesn't intercept writes to IA32_TSC, a
17  * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
18  * value.
19  *
20  * This test verifies that this unusual case is handled correctly.
21  */
22
23 #include "test_util.h"
24 #include "kvm_util.h"
25 #include "x86.h"
26 #include "vmx.h"
27
28 #include <string.h>
29 #include <sys/ioctl.h>
30
31 #ifndef MSR_IA32_TSC_ADJUST
32 #define MSR_IA32_TSC_ADJUST 0x3b
33 #endif
34
35 #define PAGE_SIZE       4096
36 #define VCPU_ID         5
37
38 #define TSC_ADJUST_VALUE (1ll << 32)
39 #define TSC_OFFSET_VALUE -(1ll << 48)
40
41 enum {
42         PORT_ABORT = 0x1000,
43         PORT_REPORT,
44         PORT_DONE,
45 };
46
47 struct vmx_page {
48         vm_vaddr_t virt;
49         vm_paddr_t phys;
50 };
51
52 enum {
53         VMXON_PAGE = 0,
54         VMCS_PAGE,
55         MSR_BITMAP_PAGE,
56
57         NUM_VMX_PAGES,
58 };
59
60 struct kvm_single_msr {
61         struct kvm_msrs header;
62         struct kvm_msr_entry entry;
63 } __attribute__((packed));
64
65 /* The virtual machine object. */
66 static struct kvm_vm *vm;
67
68 /* Array of vmx_page descriptors that is shared with the guest. */
69 struct vmx_page *vmx_pages;
70
71 #define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg))
72 static void do_exit_to_l0(uint16_t port, unsigned long arg)
73 {
74         __asm__ __volatile__("in %[port], %%al"
75                 :
76                 : [port]"d"(port), "D"(arg)
77                 : "rax");
78 }
79
80
81 #define GUEST_ASSERT(_condition) do {                                        \
82         if (!(_condition))                                                   \
83                 exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \
84 } while (0)
85
86 static void check_ia32_tsc_adjust(int64_t max)
87 {
88         int64_t adjust;
89
90         adjust = rdmsr(MSR_IA32_TSC_ADJUST);
91         exit_to_l0(PORT_REPORT, adjust);
92         GUEST_ASSERT(adjust <= max);
93 }
94
95 static void l2_guest_code(void)
96 {
97         uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
98
99         wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
100         check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
101
102         /* Exit to L1 */
103         __asm__ __volatile__("vmcall");
104 }
105
106 static void l1_guest_code(struct vmx_page *vmx_pages)
107 {
108 #define L2_GUEST_STACK_SIZE 64
109         unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
110         uint32_t control;
111         uintptr_t save_cr3;
112
113         GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
114         wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
115         check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
116
117         prepare_for_vmx_operation();
118
119         /* Enter VMX root operation. */
120         *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision();
121         GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys));
122
123         /* Load a VMCS. */
124         *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision();
125         GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys));
126         GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys));
127
128         /* Prepare the VMCS for L2 execution. */
129         prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
130         control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
131         control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
132         vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
133         vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys);
134         vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
135
136         /* Jump into L2.  First, test failure to load guest CR3.  */
137         save_cr3 = vmreadz(GUEST_CR3);
138         vmwrite(GUEST_CR3, -1ull);
139         GUEST_ASSERT(!vmlaunch());
140         GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
141                      (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
142         check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
143         vmwrite(GUEST_CR3, save_cr3);
144
145         GUEST_ASSERT(!vmlaunch());
146         GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
147
148         check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
149
150         exit_to_l0(PORT_DONE, 0);
151 }
152
153 static void allocate_vmx_page(struct vmx_page *page)
154 {
155         vm_vaddr_t virt;
156
157         virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0);
158         memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE);
159
160         page->virt = virt;
161         page->phys = addr_gva2gpa(vm, virt);
162 }
163
164 static vm_vaddr_t allocate_vmx_pages(void)
165 {
166         vm_vaddr_t vmx_pages_vaddr;
167         int i;
168
169         vmx_pages_vaddr = vm_vaddr_alloc(
170                 vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0);
171
172         vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr);
173
174         for (i = 0; i < NUM_VMX_PAGES; i++)
175                 allocate_vmx_page(&vmx_pages[i]);
176
177         return vmx_pages_vaddr;
178 }
179
180 void report(int64_t val)
181 {
182         printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
183                val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
184 }
185
186 int main(int argc, char *argv[])
187 {
188         vm_vaddr_t vmx_pages_vaddr;
189         struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
190
191         if (!(entry->ecx & CPUID_VMX)) {
192                 printf("nested VMX not enabled, skipping test");
193                 return 0;
194         }
195
196         vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code);
197
198         /* Allocate VMX pages and shared descriptors (vmx_pages). */
199         vmx_pages_vaddr = allocate_vmx_pages();
200         vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr);
201
202         for (;;) {
203                 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
204                 struct kvm_regs regs;
205
206                 vcpu_run(vm, VCPU_ID);
207                 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
208                             "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n",
209                             run->exit_reason,
210                             exit_reason_str(run->exit_reason));
211
212                 vcpu_regs_get(vm, VCPU_ID, &regs);
213
214                 switch (run->io.port) {
215                 case PORT_ABORT:
216                         TEST_ASSERT(false, "%s", (const char *) regs.rdi);
217                         /* NOT REACHED */
218                 case PORT_REPORT:
219                         report(regs.rdi);
220                         break;
221                 case PORT_DONE:
222                         goto done;
223                 default:
224                         TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
225                 }
226         }
227
228         kvm_vm_free(vm);
229 done:
230         return 0;
231 }