OSDN Git Service

KVM: SVM: Implement check_nested_events for NMI
authorCathy Avery <cavery@redhat.com>
Tue, 14 Apr 2020 20:11:06 +0000 (16:11 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 13 May 2020 16:14:24 +0000 (12:14 -0400)
Migrate nested guest NMI intercept processing
to new check_nested_events.

Signed-off-by: Cathy Avery <cavery@redhat.com>
Message-Id: <20200414201107.22952-2-cavery@redhat.com>
[Reorder clauses as NMIs have higher priority than IRQs; inject
 immediate vmexit as is now done for IRQ vmexits. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 4654668..3f268a3 100644 (file)
@@ -799,6 +799,20 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
        return vmexit;
 }
 
+static bool nested_exit_on_nmi(struct vcpu_svm *svm)
+{
+       return (svm->nested.intercept & (1ULL << INTERCEPT_NMI));
+}
+
+static void nested_svm_nmi(struct vcpu_svm *svm)
+{
+       svm->vmcb->control.exit_code = SVM_EXIT_NMI;
+       svm->vmcb->control.exit_info_1 = 0;
+       svm->vmcb->control.exit_info_2 = 0;
+
+       nested_svm_vmexit(svm);
+}
+
 static void nested_svm_intr(struct vcpu_svm *svm)
 {
        trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
@@ -822,6 +836,13 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
                kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required ||
                svm->nested.nested_run_pending;
 
+       if (vcpu->arch.nmi_pending && nested_exit_on_nmi(svm)) {
+               if (block_nested_events)
+                       return -EBUSY;
+               nested_svm_nmi(svm);
+               return 0;
+       }
+
        if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) {
                if (block_nested_events)
                        return -EBUSY;
index c2a4e2d..f97f29e 100644 (file)
@@ -3070,9 +3070,10 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
        struct vmcb *vmcb = svm->vmcb;
        int ret;
+
        ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
              !(svm->vcpu.arch.hflags & HF_NMI_MASK);
-       ret = ret && gif_set(svm) && nested_svm_nmi(svm);
+       ret = ret && gif_set(svm);
 
        return ret;
 }
@@ -3150,9 +3151,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
                return; /* STGI will cause a vm exit */
        }
 
-       if (svm->nested.exit_required)
-               return; /* we're not going to run the guest yet */
-
        /*
         * Something prevents NMI from been injected. Single step over possible
         * problem (IRET or exception injection or interrupt shadow)
index 435f332..a2bc33a 100644 (file)
@@ -373,21 +373,6 @@ void disable_nmi_singlestep(struct vcpu_svm *svm);
 #define NESTED_EXIT_DONE       1       /* Exit caused nested vmexit  */
 #define NESTED_EXIT_CONTINUE   2       /* Further checks needed      */
 
-/* This function returns true if it is save to enable the nmi window */
-static inline bool nested_svm_nmi(struct vcpu_svm *svm)
-{
-       if (!is_guest_mode(&svm->vcpu))
-               return true;
-
-       if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
-               return true;
-
-       svm->vmcb->control.exit_code = SVM_EXIT_NMI;
-       svm->nested.exit_required = true;
-
-       return false;
-}
-
 static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
 {
        return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);