1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2021, Red Hat, Inc.
5 * Tests for Hyper-V features enablement
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
11 #include "test_util.h"
13 #include "processor.h"
17 * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf
18 * but to activate the feature it is sufficient to set it to a non-zero
19 * value. Use BIT(0) for that.
21 #define HV_PV_SPINLOCKS_TEST \
22 KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
37 static bool is_write_only_msr(uint32_t msr)
39 return msr == HV_X64_MSR_EOI;
42 static void guest_msr(struct msr_data *msr)
47 GUEST_ASSERT(msr->idx);
50 vector = wrmsr_safe(msr->idx, msr->write_val);
52 if (!vector && (!msr->write || !is_write_only_msr(msr->idx)))
53 vector = rdmsr_safe(msr->idx, &msr_val);
55 if (msr->fault_expected)
56 GUEST_ASSERT_3(vector == GP_VECTOR, msr->idx, vector, GP_VECTOR);
58 GUEST_ASSERT_3(!vector, msr->idx, vector, 0);
60 if (vector || is_write_only_msr(msr->idx))
64 GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx,
65 msr_val, msr->write_val);
67 /* Invariant TSC bit appears when TSC invariant control MSR is written to */
68 if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
69 if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT))
70 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC));
72 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) ==
73 !!(msr_val & HV_INVARIANT_TSC_EXPOSED));
80 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
82 u64 res, input, output;
85 GUEST_ASSERT(hcall->control);
87 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
88 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
90 if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
92 output = pgs_gpa + 4096;
97 vector = __hyperv_hypercall(hcall->control, input, output, &res);
98 if (hcall->ud_expected) {
99 GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector);
101 GUEST_ASSERT_2(!vector, hcall->control, vector);
102 GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res);
108 static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
111 * Enable all supported Hyper-V features, then clear the leafs holding
112 * the features that will be tested one by one.
114 vcpu_set_hv_cpuid(vcpu);
116 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
117 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
118 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
121 static void guest_test_msrs_access(void)
123 struct kvm_cpuid2 *prev_cpuid = NULL;
124 struct kvm_vcpu *vcpu;
129 struct msr_data *msr;
130 bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC);
133 vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
135 msr_gva = vm_vaddr_alloc_page(vm);
136 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
137 msr = addr_gva2hva(vm, msr_gva);
139 vcpu_args_set(vcpu, 1, msr_gva);
140 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
143 vcpu_reset_hv_cpuid(vcpu);
145 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
147 vcpu_init_cpuid(vcpu, prev_cpuid);
150 vm_init_descriptor_tables(vm);
151 vcpu_init_descriptor_tables(vcpu);
153 /* TODO: Make this entire test easier to maintain. */
155 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
160 * Only available when Hyper-V identification is set
162 msr->idx = HV_X64_MSR_GUEST_OS_ID;
164 msr->fault_expected = true;
167 msr->idx = HV_X64_MSR_HYPERCALL;
169 msr->fault_expected = true;
172 vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
174 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
175 * HV_X64_MSR_HYPERCALL available.
177 msr->idx = HV_X64_MSR_GUEST_OS_ID;
179 msr->write_val = HYPERV_LINUX_OS_ID;
180 msr->fault_expected = false;
183 msr->idx = HV_X64_MSR_GUEST_OS_ID;
185 msr->fault_expected = false;
188 msr->idx = HV_X64_MSR_HYPERCALL;
190 msr->fault_expected = false;
194 msr->idx = HV_X64_MSR_VP_RUNTIME;
196 msr->fault_expected = true;
199 vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE);
200 msr->idx = HV_X64_MSR_VP_RUNTIME;
202 msr->fault_expected = false;
206 msr->idx = HV_X64_MSR_VP_RUNTIME;
209 msr->fault_expected = true;
213 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
215 msr->fault_expected = true;
218 vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE);
219 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
221 msr->fault_expected = false;
225 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
228 msr->fault_expected = true;
232 msr->idx = HV_X64_MSR_VP_INDEX;
234 msr->fault_expected = true;
237 vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE);
238 msr->idx = HV_X64_MSR_VP_INDEX;
240 msr->fault_expected = false;
244 msr->idx = HV_X64_MSR_VP_INDEX;
247 msr->fault_expected = true;
251 msr->idx = HV_X64_MSR_RESET;
253 msr->fault_expected = true;
256 vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE);
257 msr->idx = HV_X64_MSR_RESET;
259 msr->fault_expected = false;
262 msr->idx = HV_X64_MSR_RESET;
265 * TODO: the test only writes '0' to HV_X64_MSR_RESET
266 * at the moment, writing some other value there will
267 * trigger real vCPU reset and the code is not prepared
271 msr->fault_expected = false;
275 msr->idx = HV_X64_MSR_REFERENCE_TSC;
277 msr->fault_expected = true;
280 vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE);
281 msr->idx = HV_X64_MSR_REFERENCE_TSC;
283 msr->fault_expected = false;
286 msr->idx = HV_X64_MSR_REFERENCE_TSC;
289 msr->fault_expected = false;
293 msr->idx = HV_X64_MSR_EOM;
295 msr->fault_expected = true;
299 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
300 * capability enabled and guest visible CPUID bit unset.
302 msr->idx = HV_X64_MSR_EOM;
304 msr->fault_expected = true;
307 vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE);
308 msr->idx = HV_X64_MSR_EOM;
310 msr->fault_expected = false;
313 msr->idx = HV_X64_MSR_EOM;
316 msr->fault_expected = false;
320 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
322 msr->fault_expected = true;
325 vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE);
326 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
328 msr->fault_expected = false;
331 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
334 msr->fault_expected = false;
337 /* Direct mode test */
338 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
340 msr->write_val = 1 << 12;
341 msr->fault_expected = true;
344 vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE);
345 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
347 msr->write_val = 1 << 12;
348 msr->fault_expected = false;
352 msr->idx = HV_X64_MSR_EOI;
354 msr->fault_expected = true;
357 vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE);
358 msr->idx = HV_X64_MSR_EOI;
361 msr->fault_expected = false;
365 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
367 msr->fault_expected = true;
370 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS);
371 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
373 msr->fault_expected = false;
377 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
380 msr->fault_expected = true;
384 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
386 msr->fault_expected = true;
389 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT);
390 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
392 msr->fault_expected = false;
395 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
398 msr->fault_expected = false;
401 /* Can only write '0' */
402 msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
405 msr->fault_expected = true;
409 msr->idx = HV_X64_MSR_CRASH_P0;
411 msr->fault_expected = true;
414 vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE);
415 msr->idx = HV_X64_MSR_CRASH_P0;
417 msr->fault_expected = false;
420 msr->idx = HV_X64_MSR_CRASH_P0;
423 msr->fault_expected = false;
427 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
429 msr->fault_expected = true;
432 vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE);
433 vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
434 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
436 msr->fault_expected = false;
439 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
442 msr->fault_expected = false;
446 /* MSR is not available when CPUID feature bit is unset */
449 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
451 msr->fault_expected = true;
454 /* MSR is vailable when CPUID feature bit is set */
457 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
458 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
460 msr->fault_expected = false;
463 /* Writing bits other than 0 is forbidden */
466 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
468 msr->write_val = 0xdeadbeef;
469 msr->fault_expected = true;
472 /* Setting bit 0 enables the feature */
475 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
478 msr->fault_expected = false;
486 vcpu_set_cpuid(vcpu);
488 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
490 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
491 msr->idx, msr->write ? "write" : "read");
494 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
496 switch (get_ucall(vcpu, &uc)) {
498 REPORT_GUEST_ASSERT_3(uc, "MSR = %lx, arg1 = %lx, arg2 = %lx");
503 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
512 static void guest_test_hcalls_access(void)
514 struct kvm_cpuid2 *prev_cpuid = NULL;
515 struct kvm_vcpu *vcpu;
519 vm_vaddr_t hcall_page, hcall_params;
520 struct hcall_data *hcall;
523 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
525 vm_init_descriptor_tables(vm);
526 vcpu_init_descriptor_tables(vcpu);
528 /* Hypercall input/output */
529 hcall_page = vm_vaddr_alloc_pages(vm, 2);
530 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
532 hcall_params = vm_vaddr_alloc_page(vm);
533 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
534 hcall = addr_gva2hva(vm, hcall_params);
536 vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
537 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
540 vcpu_reset_hv_cpuid(vcpu);
542 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
544 vcpu_init_cpuid(vcpu, prev_cpuid);
549 vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
550 hcall->control = 0xbeef;
551 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
555 hcall->control = HVCALL_POST_MESSAGE;
556 hcall->expect = HV_STATUS_ACCESS_DENIED;
559 vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES);
560 hcall->control = HVCALL_POST_MESSAGE;
561 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
565 hcall->control = HVCALL_SIGNAL_EVENT;
566 hcall->expect = HV_STATUS_ACCESS_DENIED;
569 vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS);
570 hcall->control = HVCALL_SIGNAL_EVENT;
571 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
575 hcall->control = HVCALL_RESET_DEBUG_SESSION;
576 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
579 vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
580 hcall->control = HVCALL_RESET_DEBUG_SESSION;
581 hcall->expect = HV_STATUS_ACCESS_DENIED;
584 vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING);
585 hcall->control = HVCALL_RESET_DEBUG_SESSION;
586 hcall->expect = HV_STATUS_OPERATION_DENIED;
590 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
591 hcall->expect = HV_STATUS_ACCESS_DENIED;
594 vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED);
595 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
596 hcall->expect = HV_STATUS_SUCCESS;
599 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
600 hcall->expect = HV_STATUS_ACCESS_DENIED;
603 vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED);
604 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
605 hcall->expect = HV_STATUS_SUCCESS;
609 hcall->control = HVCALL_SEND_IPI;
610 hcall->expect = HV_STATUS_ACCESS_DENIED;
613 vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED);
614 hcall->control = HVCALL_SEND_IPI;
615 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
618 /* Nothing in 'sparse banks' -> success */
619 hcall->control = HVCALL_SEND_IPI_EX;
620 hcall->expect = HV_STATUS_SUCCESS;
624 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
625 hcall->expect = HV_STATUS_ACCESS_DENIED;
628 vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST);
629 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
630 hcall->expect = HV_STATUS_SUCCESS;
633 /* XMM fast hypercall */
634 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
635 hcall->ud_expected = true;
638 vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE);
639 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
640 hcall->ud_expected = false;
641 hcall->expect = HV_STATUS_SUCCESS;
644 hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES;
645 hcall->expect = HV_STATUS_ACCESS_DENIED;
648 vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS);
649 hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT;
650 hcall->expect = HV_STATUS_INVALID_PARAMETER;
657 vcpu_set_cpuid(vcpu);
659 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
661 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
664 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
666 switch (get_ucall(vcpu, &uc)) {
668 REPORT_GUEST_ASSERT_2(uc, "arg1 = %lx, arg2 = %lx");
673 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
684 pr_info("Testing access to Hyper-V specific MSRs\n");
685 guest_test_msrs_access();
687 pr_info("Testing access to Hyper-V hypercalls\n");
688 guest_test_hcalls_access();