2 * gtests/tests/vmx_tsc_adjust_test.c
4 * Copyright (C) 2018, Google LLC.
6 * This work is licensed under the terms of the GNU GPL, version 2.
11 * According to the SDM, "if an execution of WRMSR to the
12 * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
13 * the logical processor also adds (or subtracts) value X from the
14 * IA32_TSC_ADJUST MSR.
16 * Note that when L1 doesn't intercept writes to IA32_TSC, a
17 * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
20 * This test verifies that this unusual case is handled correctly.
23 #include "test_util.h"
29 #include <sys/ioctl.h>
31 #ifndef MSR_IA32_TSC_ADJUST
32 #define MSR_IA32_TSC_ADJUST 0x3b
35 #define PAGE_SIZE 4096
38 #define TSC_ADJUST_VALUE (1ll << 32)
39 #define TSC_OFFSET_VALUE -(1ll << 48)
60 struct kvm_single_msr {
61 struct kvm_msrs header;
62 struct kvm_msr_entry entry;
63 } __attribute__((packed));
65 /* The virtual machine object. */
66 static struct kvm_vm *vm;
68 /* Array of vmx_page descriptors that is shared with the guest. */
69 struct vmx_page *vmx_pages;
71 #define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg))
72 static void do_exit_to_l0(uint16_t port, unsigned long arg)
74 __asm__ __volatile__("in %[port], %%al"
76 : [port]"d"(port), "D"(arg)
81 #define GUEST_ASSERT(_condition) do { \
83 exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition); \
86 static void check_ia32_tsc_adjust(int64_t max)
90 adjust = rdmsr(MSR_IA32_TSC_ADJUST);
91 exit_to_l0(PORT_REPORT, adjust);
92 GUEST_ASSERT(adjust <= max);
95 static void l2_guest_code(void)
97 uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
99 wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
100 check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
103 __asm__ __volatile__("vmcall");
106 static void l1_guest_code(struct vmx_page *vmx_pages)
108 #define L2_GUEST_STACK_SIZE 64
109 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
113 GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
114 wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
115 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
117 prepare_for_vmx_operation();
119 /* Enter VMX root operation. */
120 *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision();
121 GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys));
124 *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision();
125 GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys));
126 GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys));
128 /* Prepare the VMCS for L2 execution. */
129 prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
130 control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
131 control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
132 vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
133 vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys);
134 vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
136 /* Jump into L2. First, test failure to load guest CR3. */
137 save_cr3 = vmreadz(GUEST_CR3);
138 vmwrite(GUEST_CR3, -1ull);
139 GUEST_ASSERT(!vmlaunch());
140 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
141 (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
142 check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
143 vmwrite(GUEST_CR3, save_cr3);
145 GUEST_ASSERT(!vmlaunch());
146 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
148 check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
150 exit_to_l0(PORT_DONE, 0);
153 static void allocate_vmx_page(struct vmx_page *page)
157 virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0);
158 memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE);
161 page->phys = addr_gva2gpa(vm, virt);
164 static vm_vaddr_t allocate_vmx_pages(void)
166 vm_vaddr_t vmx_pages_vaddr;
169 vmx_pages_vaddr = vm_vaddr_alloc(
170 vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0);
172 vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr);
174 for (i = 0; i < NUM_VMX_PAGES; i++)
175 allocate_vmx_page(&vmx_pages[i]);
177 return vmx_pages_vaddr;
180 void report(int64_t val)
182 printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
183 val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
186 int main(int argc, char *argv[])
188 vm_vaddr_t vmx_pages_vaddr;
189 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
191 if (!(entry->ecx & CPUID_VMX)) {
192 printf("nested VMX not enabled, skipping test");
196 vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code);
198 /* Allocate VMX pages and shared descriptors (vmx_pages). */
199 vmx_pages_vaddr = allocate_vmx_pages();
200 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr);
203 volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
204 struct kvm_regs regs;
206 vcpu_run(vm, VCPU_ID);
207 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
208 "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n",
210 exit_reason_str(run->exit_reason));
212 vcpu_regs_get(vm, VCPU_ID, ®s);
214 switch (run->io.port) {
216 TEST_ASSERT(false, "%s", (const char *) regs.rdi);
224 TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);