OSDN Git Service

KVM: x86: introduce ISA specific smi_allowed callback
authorLadi Prosek <lprosek@redhat.com>
Wed, 11 Oct 2017 14:54:41 +0000 (16:54 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 12 Oct 2017 12:01:55 +0000 (14:01 +0200)
Similar to NMI, there may be ISA specific reasons why an SMI cannot be
injected into the guest. This commit adds a new smi_allowed callback to
be implemented in following commits.

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 23a9a53..411ddbb 100644 (file)
@@ -1062,6 +1062,7 @@ struct kvm_x86_ops {
 
        void (*setup_mce)(struct kvm_vcpu *vcpu);
 
+       int (*smi_allowed)(struct kvm_vcpu *vcpu);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
        int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
 };
index c4e9b99..e3c61a3 100644 (file)
@@ -5401,6 +5401,11 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
        vcpu->arch.mcg_cap &= 0x1ff;
 }
 
+static int svm_smi_allowed(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 {
        /* TODO: Implement */
@@ -5524,6 +5529,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .update_pi_irte = svm_update_pi_irte,
        .setup_mce = svm_setup_mce,
 
+       .smi_allowed = svm_smi_allowed,
        .pre_enter_smm = svm_pre_enter_smm,
        .pre_leave_smm = svm_pre_leave_smm,
 };
index 1305bb6..156ecba 100644 (file)
@@ -11916,6 +11916,11 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
                        ~FEATURE_CONTROL_LMCE;
 }
 
+static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
 static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 {
        /* TODO: Implement */
@@ -12054,6 +12059,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
 
        .setup_mce = vmx_setup_mce,
 
+       .smi_allowed = vmx_smi_allowed,
        .pre_enter_smm = vmx_pre_enter_smm,
        .pre_leave_smm = vmx_pre_leave_smm,
 };
index 9e85a69..693bf8d 100644 (file)
@@ -6438,7 +6438,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
                }
 
                kvm_x86_ops->queue_exception(vcpu);
-       } else if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
+       } else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) {
                vcpu->arch.smi_pending = false;
                enter_smm(vcpu);
        } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {