1 // SPDX-License-Identifier: GPL-2.0-only
2 #define _GNU_SOURCE /* for program_invocation_short_name */
10 #define L2_GUEST_STACK_SIZE 256
13 * Arbitrary, never shoved into KVM/hardware, just need to avoid conflict with
14 * the "real" exceptions used, #SS/#GP/#DF (12/13/8).
16 #define FAKE_TRIPLE_FAULT_VECTOR 0xaa
18 /* Arbitrary 32-bit error code injected by this test. */
19 #define SS_ERROR_CODE 0xdeadbeef
22 * Bit '0' is set on Intel if the exception occurs while delivering a previous
23 * event/exception. AMD's wording is ambiguous, but presumably the bit is set
24 * if the exception occurs while delivering an external event, e.g. NMI or INTR,
25 * but not for exceptions that occur when delivering other exceptions or
26 * software interrupts.
28 * Note, Intel's name for it, "External event", is misleading and much more
29 * aligned with AMD's behavior, but the SDM is quite clear on its behavior.
31 #define ERROR_CODE_EXT_FLAG BIT(0)
34 * Bit '1' is set if the fault occurred when looking up a descriptor in the
35 * IDT, which is the case here as the IDT is empty/NULL.
37 #define ERROR_CODE_IDT_FLAG BIT(1)
40 * The #GP that occurs when vectoring #SS should show the index into the IDT
41 * for #SS, plus have the "IDT flag" set.
43 #define GP_ERROR_CODE_AMD ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG)
44 #define GP_ERROR_CODE_INTEL ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG | ERROR_CODE_EXT_FLAG)
47 * Intel and AMD both shove '0' into the error code on #DF, regardless of what
48 * led to the double fault.
50 #define DF_ERROR_CODE 0
52 #define INTERCEPT_SS (BIT_ULL(SS_VECTOR))
53 #define INTERCEPT_SS_DF (INTERCEPT_SS | BIT_ULL(DF_VECTOR))
54 #define INTERCEPT_SS_GP_DF (INTERCEPT_SS_DF | BIT_ULL(GP_VECTOR))
56 static void l2_ss_pending_test(void)
58 GUEST_SYNC(SS_VECTOR);
61 static void l2_ss_injected_gp_test(void)
63 GUEST_SYNC(GP_VECTOR);
66 static void l2_ss_injected_df_test(void)
68 GUEST_SYNC(DF_VECTOR);
71 static void l2_ss_injected_tf_test(void)
73 GUEST_SYNC(FAKE_TRIPLE_FAULT_VECTOR);
76 static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector,
79 struct vmcb *vmcb = svm->vmcb;
80 struct vmcb_control_area *ctrl = &vmcb->control;
82 vmcb->save.rip = (u64)l2_code;
83 run_guest(vmcb, svm->vmcb_gpa);
85 if (vector == FAKE_TRIPLE_FAULT_VECTOR)
88 GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector));
89 GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code);
92 static void l1_svm_code(struct svm_test_data *svm)
94 struct vmcb_control_area *ctrl = &svm->vmcb->control;
95 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
97 generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
98 svm->vmcb->save.idtr.limit = 0;
99 ctrl->intercept |= BIT_ULL(INTERCEPT_SHUTDOWN);
101 ctrl->intercept_exceptions = INTERCEPT_SS_GP_DF;
102 svm_run_l2(svm, l2_ss_pending_test, SS_VECTOR, SS_ERROR_CODE);
103 svm_run_l2(svm, l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_AMD);
105 ctrl->intercept_exceptions = INTERCEPT_SS_DF;
106 svm_run_l2(svm, l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
108 ctrl->intercept_exceptions = INTERCEPT_SS;
109 svm_run_l2(svm, l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
110 GUEST_ASSERT_EQ(ctrl->exit_code, SVM_EXIT_SHUTDOWN);
115 static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code)
117 GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code));
119 GUEST_ASSERT_EQ(vector == SS_VECTOR ? vmlaunch() : vmresume(), 0);
121 if (vector == FAKE_TRIPLE_FAULT_VECTOR)
124 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
125 GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector);
126 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code);
129 static void l1_vmx_code(struct vmx_pages *vmx)
131 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
133 GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);
135 GUEST_ASSERT_EQ(load_vmcs(vmx), true);
137 prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
138 GUEST_ASSERT_EQ(vmwrite(GUEST_IDTR_LIMIT, 0), 0);
141 * VMX disallows injecting an exception with error_code[31:16] != 0,
142 * and hardware will never generate a VM-Exit with bits 31:16 set.
143 * KVM should likewise truncate the "bad" userspace value.
145 GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_GP_DF), 0);
146 vmx_run_l2(l2_ss_pending_test, SS_VECTOR, (u16)SS_ERROR_CODE);
147 vmx_run_l2(l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_INTEL);
149 GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_DF), 0);
150 vmx_run_l2(l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
152 GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS), 0);
153 vmx_run_l2(l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
154 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_TRIPLE_FAULT);
159 static void __attribute__((__flatten__)) l1_guest_code(void *test_data)
161 if (this_cpu_has(X86_FEATURE_SVM))
162 l1_svm_code(test_data);
164 l1_vmx_code(test_data);
167 static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)
171 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
173 switch (get_ucall(vcpu, &uc)) {
175 TEST_ASSERT(vector == uc.args[1],
176 "Expected L2 to ask for %d, got %ld", vector, uc.args[1]);
179 TEST_ASSERT(vector == -1,
180 "Expected L2 to ask for %d, L2 says it's done", vector);
183 TEST_FAIL("%s at %s:%ld (0x%lx != 0x%lx)",
184 (const char *)uc.args[0], __FILE__, uc.args[1],
185 uc.args[2], uc.args[3]);
188 TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd);
192 static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject)
194 struct kvm_vcpu_events events;
196 vcpu_events_get(vcpu, &events);
198 TEST_ASSERT(!events.exception.pending,
199 "Vector %d unexpectedlt pending", events.exception.nr);
200 TEST_ASSERT(!events.exception.injected,
201 "Vector %d unexpectedly injected", events.exception.nr);
203 events.flags = KVM_VCPUEVENT_VALID_PAYLOAD;
204 events.exception.pending = !inject;
205 events.exception.injected = inject;
206 events.exception.nr = SS_VECTOR;
207 events.exception.has_error_code = true;
208 events.exception.error_code = SS_ERROR_CODE;
209 vcpu_events_set(vcpu, &events);
213 * Verify KVM_{G,S}ET_EVENTS play nice with pending vs. injected exceptions
214 * when an exception is being queued for L2. Specifically, verify that KVM
215 * honors L1 exception intercept controls when a #SS is pending/injected,
216 * triggers a #GP on vectoring the #SS, morphs to #DF if #GP isn't intercepted
217 * by L1, and finally causes (nested) SHUTDOWN if #DF isn't intercepted by L1.
219 int main(int argc, char *argv[])
221 vm_vaddr_t nested_test_data_gva;
222 struct kvm_vcpu_events events;
223 struct kvm_vcpu *vcpu;
226 TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXCEPTION_PAYLOAD));
227 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
229 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
230 vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul);
232 if (kvm_cpu_has(X86_FEATURE_SVM))
233 vcpu_alloc_svm(vm, &nested_test_data_gva);
235 vcpu_alloc_vmx(vm, &nested_test_data_gva);
237 vcpu_args_set(vcpu, 1, nested_test_data_gva);
239 /* Run L1 => L2. L2 should sync and request #SS. */
241 assert_ucall_vector(vcpu, SS_VECTOR);
243 /* Pend #SS and request immediate exit. #SS should still be pending. */
244 queue_ss_exception(vcpu, false);
245 vcpu->run->immediate_exit = true;
246 vcpu_run_complete_io(vcpu);
248 /* Verify the pending events comes back out the same as it went in. */
249 vcpu_events_get(vcpu, &events);
250 ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
251 KVM_VCPUEVENT_VALID_PAYLOAD);
252 ASSERT_EQ(events.exception.pending, true);
253 ASSERT_EQ(events.exception.nr, SS_VECTOR);
254 ASSERT_EQ(events.exception.has_error_code, true);
255 ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
258 * Run for real with the pending #SS, L1 should get a VM-Exit due to
259 * #SS interception and re-enter L2 to request #GP (via injected #SS).
261 vcpu->run->immediate_exit = false;
263 assert_ucall_vector(vcpu, GP_VECTOR);
266 * Inject #SS, the #SS should bypass interception and cause #GP, which
267 * L1 should intercept before KVM morphs it to #DF. L1 should then
268 * disable #GP interception and run L2 to request #DF (via #SS => #GP).
270 queue_ss_exception(vcpu, true);
272 assert_ucall_vector(vcpu, DF_VECTOR);
275 * Inject #SS, the #SS should bypass interception and cause #GP, which
276 * L1 is no longer interception, and so should see a #DF VM-Exit. L1
277 * should then signal that is done.
279 queue_ss_exception(vcpu, true);
281 assert_ucall_vector(vcpu, FAKE_TRIPLE_FAULT_VECTOR);
284 * Inject #SS yet again. L1 is not intercepting #GP or #DF, and so
285 * should see nested TRIPLE_FAULT / SHUTDOWN.
287 queue_ss_exception(vcpu, true);
289 assert_ucall_vector(vcpu, -1);