OSDN Git Service

perf/x86/uncore: Correct the number of CHAs on EMR
[tomoyo/tomoyo-test1.git] / tools / testing / selftests / kvm / x86_64 / hyperv_features.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021, Red Hat, Inc.
4  *
5  * Tests for Hyper-V features enablement
6  */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
10
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "hyperv.h"
15
16 /*
17  * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf
18  * but to activate the feature it is sufficient to set it to a non-zero
19  * value. Use BIT(0) for that.
20  */
21 #define HV_PV_SPINLOCKS_TEST            \
22         KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
23
24 struct msr_data {
25         uint32_t idx;
26         bool fault_expected;
27         bool write;
28         u64 write_val;
29 };
30
31 struct hcall_data {
32         uint64_t control;
33         uint64_t expect;
34         bool ud_expected;
35 };
36
37 static bool is_write_only_msr(uint32_t msr)
38 {
39         return msr == HV_X64_MSR_EOI;
40 }
41
42 static void guest_msr(struct msr_data *msr)
43 {
44         uint8_t vector = 0;
45         uint64_t msr_val = 0;
46
47         GUEST_ASSERT(msr->idx);
48
49         if (msr->write)
50                 vector = wrmsr_safe(msr->idx, msr->write_val);
51
52         if (!vector && (!msr->write || !is_write_only_msr(msr->idx)))
53                 vector = rdmsr_safe(msr->idx, &msr_val);
54
55         if (msr->fault_expected)
56                 GUEST_ASSERT_3(vector == GP_VECTOR, msr->idx, vector, GP_VECTOR);
57         else
58                 GUEST_ASSERT_3(!vector, msr->idx, vector, 0);
59
60         if (vector || is_write_only_msr(msr->idx))
61                 goto done;
62
63         if (msr->write)
64                 GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx,
65                                msr_val, msr->write_val);
66
67         /* Invariant TSC bit appears when TSC invariant control MSR is written to */
68         if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
69                 if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT))
70                         GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC));
71                 else
72                         GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) ==
73                                      !!(msr_val & HV_INVARIANT_TSC_EXPOSED));
74         }
75
76 done:
77         GUEST_DONE();
78 }
79
80 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
81 {
82         u64 res, input, output;
83         uint8_t vector;
84
85         GUEST_ASSERT(hcall->control);
86
87         wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
88         wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
89
90         if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
91                 input = pgs_gpa;
92                 output = pgs_gpa + 4096;
93         } else {
94                 input = output = 0;
95         }
96
97         vector = __hyperv_hypercall(hcall->control, input, output, &res);
98         if (hcall->ud_expected) {
99                 GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector);
100         } else {
101                 GUEST_ASSERT_2(!vector, hcall->control, vector);
102                 GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res);
103         }
104
105         GUEST_DONE();
106 }
107
108 static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
109 {
110         /*
111          * Enable all supported Hyper-V features, then clear the leafs holding
112          * the features that will be tested one by one.
113          */
114         vcpu_set_hv_cpuid(vcpu);
115
116         vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
117         vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
118         vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
119 }
120
121 static void guest_test_msrs_access(void)
122 {
123         struct kvm_cpuid2 *prev_cpuid = NULL;
124         struct kvm_vcpu *vcpu;
125         struct kvm_vm *vm;
126         struct ucall uc;
127         int stage = 0;
128         vm_vaddr_t msr_gva;
129         struct msr_data *msr;
130         bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC);
131
132         while (true) {
133                 vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
134
135                 msr_gva = vm_vaddr_alloc_page(vm);
136                 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
137                 msr = addr_gva2hva(vm, msr_gva);
138
139                 vcpu_args_set(vcpu, 1, msr_gva);
140                 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
141
142                 if (!prev_cpuid) {
143                         vcpu_reset_hv_cpuid(vcpu);
144
145                         prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
146                 } else {
147                         vcpu_init_cpuid(vcpu, prev_cpuid);
148                 }
149
150                 vm_init_descriptor_tables(vm);
151                 vcpu_init_descriptor_tables(vcpu);
152
153                 /* TODO: Make this entire test easier to maintain. */
154                 if (stage >= 21)
155                         vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
156
157                 switch (stage) {
158                 case 0:
159                         /*
160                          * Only available when Hyper-V identification is set
161                          */
162                         msr->idx = HV_X64_MSR_GUEST_OS_ID;
163                         msr->write = false;
164                         msr->fault_expected = true;
165                         break;
166                 case 1:
167                         msr->idx = HV_X64_MSR_HYPERCALL;
168                         msr->write = false;
169                         msr->fault_expected = true;
170                         break;
171                 case 2:
172                         vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
173                         /*
174                          * HV_X64_MSR_GUEST_OS_ID has to be written first to make
175                          * HV_X64_MSR_HYPERCALL available.
176                          */
177                         msr->idx = HV_X64_MSR_GUEST_OS_ID;
178                         msr->write = true;
179                         msr->write_val = HYPERV_LINUX_OS_ID;
180                         msr->fault_expected = false;
181                         break;
182                 case 3:
183                         msr->idx = HV_X64_MSR_GUEST_OS_ID;
184                         msr->write = false;
185                         msr->fault_expected = false;
186                         break;
187                 case 4:
188                         msr->idx = HV_X64_MSR_HYPERCALL;
189                         msr->write = false;
190                         msr->fault_expected = false;
191                         break;
192
193                 case 5:
194                         msr->idx = HV_X64_MSR_VP_RUNTIME;
195                         msr->write = false;
196                         msr->fault_expected = true;
197                         break;
198                 case 6:
199                         vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE);
200                         msr->idx = HV_X64_MSR_VP_RUNTIME;
201                         msr->write = false;
202                         msr->fault_expected = false;
203                         break;
204                 case 7:
205                         /* Read only */
206                         msr->idx = HV_X64_MSR_VP_RUNTIME;
207                         msr->write = true;
208                         msr->write_val = 1;
209                         msr->fault_expected = true;
210                         break;
211
212                 case 8:
213                         msr->idx = HV_X64_MSR_TIME_REF_COUNT;
214                         msr->write = false;
215                         msr->fault_expected = true;
216                         break;
217                 case 9:
218                         vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE);
219                         msr->idx = HV_X64_MSR_TIME_REF_COUNT;
220                         msr->write = false;
221                         msr->fault_expected = false;
222                         break;
223                 case 10:
224                         /* Read only */
225                         msr->idx = HV_X64_MSR_TIME_REF_COUNT;
226                         msr->write = true;
227                         msr->write_val = 1;
228                         msr->fault_expected = true;
229                         break;
230
231                 case 11:
232                         msr->idx = HV_X64_MSR_VP_INDEX;
233                         msr->write = false;
234                         msr->fault_expected = true;
235                         break;
236                 case 12:
237                         vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE);
238                         msr->idx = HV_X64_MSR_VP_INDEX;
239                         msr->write = false;
240                         msr->fault_expected = false;
241                         break;
242                 case 13:
243                         /* Read only */
244                         msr->idx = HV_X64_MSR_VP_INDEX;
245                         msr->write = true;
246                         msr->write_val = 1;
247                         msr->fault_expected = true;
248                         break;
249
250                 case 14:
251                         msr->idx = HV_X64_MSR_RESET;
252                         msr->write = false;
253                         msr->fault_expected = true;
254                         break;
255                 case 15:
256                         vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE);
257                         msr->idx = HV_X64_MSR_RESET;
258                         msr->write = false;
259                         msr->fault_expected = false;
260                         break;
261                 case 16:
262                         msr->idx = HV_X64_MSR_RESET;
263                         msr->write = true;
264                         /*
265                          * TODO: the test only writes '0' to HV_X64_MSR_RESET
266                          * at the moment, writing some other value there will
267                          * trigger real vCPU reset and the code is not prepared
268                          * to handle it yet.
269                          */
270                         msr->write_val = 0;
271                         msr->fault_expected = false;
272                         break;
273
274                 case 17:
275                         msr->idx = HV_X64_MSR_REFERENCE_TSC;
276                         msr->write = false;
277                         msr->fault_expected = true;
278                         break;
279                 case 18:
280                         vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE);
281                         msr->idx = HV_X64_MSR_REFERENCE_TSC;
282                         msr->write = false;
283                         msr->fault_expected = false;
284                         break;
285                 case 19:
286                         msr->idx = HV_X64_MSR_REFERENCE_TSC;
287                         msr->write = true;
288                         msr->write_val = 0;
289                         msr->fault_expected = false;
290                         break;
291
292                 case 20:
293                         msr->idx = HV_X64_MSR_EOM;
294                         msr->write = false;
295                         msr->fault_expected = true;
296                         break;
297                 case 21:
298                         /*
299                          * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
300                          * capability enabled and guest visible CPUID bit unset.
301                          */
302                         msr->idx = HV_X64_MSR_EOM;
303                         msr->write = false;
304                         msr->fault_expected = true;
305                         break;
306                 case 22:
307                         vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE);
308                         msr->idx = HV_X64_MSR_EOM;
309                         msr->write = false;
310                         msr->fault_expected = false;
311                         break;
312                 case 23:
313                         msr->idx = HV_X64_MSR_EOM;
314                         msr->write = true;
315                         msr->write_val = 0;
316                         msr->fault_expected = false;
317                         break;
318
319                 case 24:
320                         msr->idx = HV_X64_MSR_STIMER0_CONFIG;
321                         msr->write = false;
322                         msr->fault_expected = true;
323                         break;
324                 case 25:
325                         vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE);
326                         msr->idx = HV_X64_MSR_STIMER0_CONFIG;
327                         msr->write = false;
328                         msr->fault_expected = false;
329                         break;
330                 case 26:
331                         msr->idx = HV_X64_MSR_STIMER0_CONFIG;
332                         msr->write = true;
333                         msr->write_val = 0;
334                         msr->fault_expected = false;
335                         break;
336                 case 27:
337                         /* Direct mode test */
338                         msr->idx = HV_X64_MSR_STIMER0_CONFIG;
339                         msr->write = true;
340                         msr->write_val = 1 << 12;
341                         msr->fault_expected = true;
342                         break;
343                 case 28:
344                         vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE);
345                         msr->idx = HV_X64_MSR_STIMER0_CONFIG;
346                         msr->write = true;
347                         msr->write_val = 1 << 12;
348                         msr->fault_expected = false;
349                         break;
350
351                 case 29:
352                         msr->idx = HV_X64_MSR_EOI;
353                         msr->write = false;
354                         msr->fault_expected = true;
355                         break;
356                 case 30:
357                         vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE);
358                         msr->idx = HV_X64_MSR_EOI;
359                         msr->write = true;
360                         msr->write_val = 1;
361                         msr->fault_expected = false;
362                         break;
363
364                 case 31:
365                         msr->idx = HV_X64_MSR_TSC_FREQUENCY;
366                         msr->write = false;
367                         msr->fault_expected = true;
368                         break;
369                 case 32:
370                         vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS);
371                         msr->idx = HV_X64_MSR_TSC_FREQUENCY;
372                         msr->write = false;
373                         msr->fault_expected = false;
374                         break;
375                 case 33:
376                         /* Read only */
377                         msr->idx = HV_X64_MSR_TSC_FREQUENCY;
378                         msr->write = true;
379                         msr->write_val = 1;
380                         msr->fault_expected = true;
381                         break;
382
383                 case 34:
384                         msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
385                         msr->write = false;
386                         msr->fault_expected = true;
387                         break;
388                 case 35:
389                         vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT);
390                         msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
391                         msr->write = false;
392                         msr->fault_expected = false;
393                         break;
394                 case 36:
395                         msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
396                         msr->write = true;
397                         msr->write_val = 1;
398                         msr->fault_expected = false;
399                         break;
400                 case 37:
401                         /* Can only write '0' */
402                         msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
403                         msr->write = true;
404                         msr->write_val = 1;
405                         msr->fault_expected = true;
406                         break;
407
408                 case 38:
409                         msr->idx = HV_X64_MSR_CRASH_P0;
410                         msr->write = false;
411                         msr->fault_expected = true;
412                         break;
413                 case 39:
414                         vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE);
415                         msr->idx = HV_X64_MSR_CRASH_P0;
416                         msr->write = false;
417                         msr->fault_expected = false;
418                         break;
419                 case 40:
420                         msr->idx = HV_X64_MSR_CRASH_P0;
421                         msr->write = true;
422                         msr->write_val = 1;
423                         msr->fault_expected = false;
424                         break;
425
426                 case 41:
427                         msr->idx = HV_X64_MSR_SYNDBG_STATUS;
428                         msr->write = false;
429                         msr->fault_expected = true;
430                         break;
431                 case 42:
432                         vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE);
433                         vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
434                         msr->idx = HV_X64_MSR_SYNDBG_STATUS;
435                         msr->write = false;
436                         msr->fault_expected = false;
437                         break;
438                 case 43:
439                         msr->idx = HV_X64_MSR_SYNDBG_STATUS;
440                         msr->write = true;
441                         msr->write_val = 0;
442                         msr->fault_expected = false;
443                         break;
444
445                 case 44:
446                         /* MSR is not available when CPUID feature bit is unset */
447                         if (!has_invtsc)
448                                 continue;
449                         msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
450                         msr->write = false;
451                         msr->fault_expected = true;
452                         break;
453                 case 45:
454                         /* MSR is vailable when CPUID feature bit is set */
455                         if (!has_invtsc)
456                                 continue;
457                         vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
458                         msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
459                         msr->write = false;
460                         msr->fault_expected = false;
461                         break;
462                 case 46:
463                         /* Writing bits other than 0 is forbidden */
464                         if (!has_invtsc)
465                                 continue;
466                         msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
467                         msr->write = true;
468                         msr->write_val = 0xdeadbeef;
469                         msr->fault_expected = true;
470                         break;
471                 case 47:
472                         /* Setting bit 0 enables the feature */
473                         if (!has_invtsc)
474                                 continue;
475                         msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
476                         msr->write = true;
477                         msr->write_val = 1;
478                         msr->fault_expected = false;
479                         break;
480
481                 default:
482                         kvm_vm_free(vm);
483                         return;
484                 }
485
486                 vcpu_set_cpuid(vcpu);
487
488                 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
489
490                 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
491                          msr->idx, msr->write ? "write" : "read");
492
493                 vcpu_run(vcpu);
494                 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
495
496                 switch (get_ucall(vcpu, &uc)) {
497                 case UCALL_ABORT:
498                         REPORT_GUEST_ASSERT_3(uc, "MSR = %lx, arg1 = %lx, arg2 = %lx");
499                         return;
500                 case UCALL_DONE:
501                         break;
502                 default:
503                         TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
504                         return;
505                 }
506
507                 stage++;
508                 kvm_vm_free(vm);
509         }
510 }
511
512 static void guest_test_hcalls_access(void)
513 {
514         struct kvm_cpuid2 *prev_cpuid = NULL;
515         struct kvm_vcpu *vcpu;
516         struct kvm_vm *vm;
517         struct ucall uc;
518         int stage = 0;
519         vm_vaddr_t hcall_page, hcall_params;
520         struct hcall_data *hcall;
521
522         while (true) {
523                 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
524
525                 vm_init_descriptor_tables(vm);
526                 vcpu_init_descriptor_tables(vcpu);
527
528                 /* Hypercall input/output */
529                 hcall_page = vm_vaddr_alloc_pages(vm, 2);
530                 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
531
532                 hcall_params = vm_vaddr_alloc_page(vm);
533                 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
534                 hcall = addr_gva2hva(vm, hcall_params);
535
536                 vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
537                 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
538
539                 if (!prev_cpuid) {
540                         vcpu_reset_hv_cpuid(vcpu);
541
542                         prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
543                 } else {
544                         vcpu_init_cpuid(vcpu, prev_cpuid);
545                 }
546
547                 switch (stage) {
548                 case 0:
549                         vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
550                         hcall->control = 0xbeef;
551                         hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
552                         break;
553
554                 case 1:
555                         hcall->control = HVCALL_POST_MESSAGE;
556                         hcall->expect = HV_STATUS_ACCESS_DENIED;
557                         break;
558                 case 2:
559                         vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES);
560                         hcall->control = HVCALL_POST_MESSAGE;
561                         hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
562                         break;
563
564                 case 3:
565                         hcall->control = HVCALL_SIGNAL_EVENT;
566                         hcall->expect = HV_STATUS_ACCESS_DENIED;
567                         break;
568                 case 4:
569                         vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS);
570                         hcall->control = HVCALL_SIGNAL_EVENT;
571                         hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
572                         break;
573
574                 case 5:
575                         hcall->control = HVCALL_RESET_DEBUG_SESSION;
576                         hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
577                         break;
578                 case 6:
579                         vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
580                         hcall->control = HVCALL_RESET_DEBUG_SESSION;
581                         hcall->expect = HV_STATUS_ACCESS_DENIED;
582                         break;
583                 case 7:
584                         vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING);
585                         hcall->control = HVCALL_RESET_DEBUG_SESSION;
586                         hcall->expect = HV_STATUS_OPERATION_DENIED;
587                         break;
588
589                 case 8:
590                         hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
591                         hcall->expect = HV_STATUS_ACCESS_DENIED;
592                         break;
593                 case 9:
594                         vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED);
595                         hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
596                         hcall->expect = HV_STATUS_SUCCESS;
597                         break;
598                 case 10:
599                         hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
600                         hcall->expect = HV_STATUS_ACCESS_DENIED;
601                         break;
602                 case 11:
603                         vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED);
604                         hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
605                         hcall->expect = HV_STATUS_SUCCESS;
606                         break;
607
608                 case 12:
609                         hcall->control = HVCALL_SEND_IPI;
610                         hcall->expect = HV_STATUS_ACCESS_DENIED;
611                         break;
612                 case 13:
613                         vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED);
614                         hcall->control = HVCALL_SEND_IPI;
615                         hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
616                         break;
617                 case 14:
618                         /* Nothing in 'sparse banks' -> success */
619                         hcall->control = HVCALL_SEND_IPI_EX;
620                         hcall->expect = HV_STATUS_SUCCESS;
621                         break;
622
623                 case 15:
624                         hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
625                         hcall->expect = HV_STATUS_ACCESS_DENIED;
626                         break;
627                 case 16:
628                         vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST);
629                         hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
630                         hcall->expect = HV_STATUS_SUCCESS;
631                         break;
632                 case 17:
633                         /* XMM fast hypercall */
634                         hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
635                         hcall->ud_expected = true;
636                         break;
637                 case 18:
638                         vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE);
639                         hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
640                         hcall->ud_expected = false;
641                         hcall->expect = HV_STATUS_SUCCESS;
642                         break;
643                 case 19:
644                         hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES;
645                         hcall->expect = HV_STATUS_ACCESS_DENIED;
646                         break;
647                 case 20:
648                         vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS);
649                         hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT;
650                         hcall->expect = HV_STATUS_INVALID_PARAMETER;
651                         break;
652                 case 21:
653                         kvm_vm_free(vm);
654                         return;
655                 }
656
657                 vcpu_set_cpuid(vcpu);
658
659                 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
660
661                 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
662
663                 vcpu_run(vcpu);
664                 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
665
666                 switch (get_ucall(vcpu, &uc)) {
667                 case UCALL_ABORT:
668                         REPORT_GUEST_ASSERT_2(uc, "arg1 = %lx, arg2 = %lx");
669                         return;
670                 case UCALL_DONE:
671                         break;
672                 default:
673                         TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
674                         return;
675                 }
676
677                 stage++;
678                 kvm_vm_free(vm);
679         }
680 }
681
682 int main(void)
683 {
684         pr_info("Testing access to Hyper-V specific MSRs\n");
685         guest_test_msrs_access();
686
687         pr_info("Testing access to Hyper-V hypercalls\n");
688         guest_test_hcalls_access();
689 }