OSDN Git Service

kvm: x86: Introduce APICv inhibit reason bits
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Thu, 14 Nov 2019 20:15:05 +0000 (14:15 -0600)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 5 Feb 2020 14:17:40 +0000 (15:17 +0100)
There are several reasons in which a VM needs to deactivate APICv
e.g. disable APICv via parameter during module loading, or when
enable Hyper-V SynIC support. Additional inhibit reasons will be
introduced later on when dynamic APICv is supported,

Introduce KVM APICv inhibit reason bits along with a new variable,
apicv_inhibit_reasons, to help keep track of APICv state for each VM,

Initially, the APICV_INHIBIT_REASON_DISABLE bit is used to indicate
the case where APICv is disabled during KVM module load.
(e.g. insmod kvm_amd avic=0 or insmod kvm_intel enable_apicv=0).

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
[Do not use get_enable_apicv; consider irqchip_split in svm.c. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 329d01c..4d57e4b 100644 (file)
@@ -873,6 +873,8 @@ enum kvm_irqchip_mode {
        KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
 };
 
+#define APICV_INHIBIT_REASON_DISABLE    0
+
 struct kvm_arch {
        unsigned long n_used_mmu_pages;
        unsigned long n_requested_mmu_pages;
@@ -904,6 +906,7 @@ struct kvm_arch {
        struct kvm_apic_map *apic_map;
 
        bool apic_access_page_done;
+       unsigned long apicv_inhibit_reasons;
 
        gpa_t wall_clock;
 
@@ -1478,6 +1481,8 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
                                struct x86_exception *exception);
 
 void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
+bool kvm_apicv_activated(struct kvm *kvm);
+void kvm_apicv_init(struct kvm *kvm, bool enable);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
index 9dbb990..ed39f72 100644 (file)
@@ -2052,6 +2052,18 @@ free_avic:
        return err;
 }
 
+static int svm_vm_init(struct kvm *kvm)
+{
+       if (avic) {
+               int ret = avic_vm_init(kvm);
+               if (ret)
+                       return ret;
+       }
+
+       kvm_apicv_init(kvm, avic && irqchip_split(kvm));
+       return 0;
+}
+
 static inline int
 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
 {
@@ -7274,7 +7286,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
 
        .vm_alloc = svm_vm_alloc,
        .vm_free = svm_vm_free,
-       .vm_init = avic_vm_init,
+       .vm_init = svm_vm_init,
        .vm_destroy = svm_vm_destroy,
 
        .prepare_guest_switch = svm_prepare_guest_switch,
index c475fa2..69bd10a 100644 (file)
@@ -6813,6 +6813,7 @@ static int vmx_vm_init(struct kvm *kvm)
                        break;
                }
        }
+       kvm_apicv_init(kvm, enable_apicv);
        return 0;
 }
 
index 2d3be7f..98209b8 100644 (file)
@@ -7469,6 +7469,23 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
        kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
 }
 
+bool kvm_apicv_activated(struct kvm *kvm)
+{
+       return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
+}
+EXPORT_SYMBOL_GPL(kvm_apicv_activated);
+
+void kvm_apicv_init(struct kvm *kvm, bool enable)
+{
+       if (enable)
+               clear_bit(APICV_INHIBIT_REASON_DISABLE,
+                         &kvm->arch.apicv_inhibit_reasons);
+       else
+               set_bit(APICV_INHIBIT_REASON_DISABLE,
+                       &kvm->arch.apicv_inhibit_reasons);
+}
+EXPORT_SYMBOL_GPL(kvm_apicv_init);
+
 static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
 {
        struct kvm_vcpu *target = NULL;
@@ -9219,10 +9236,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
                return r;
 
        if (irqchip_in_kernel(vcpu->kvm)) {
-               vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm);
                r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
                if (r < 0)
                        goto fail_mmu_destroy;
+               if (kvm_apicv_activated(vcpu->kvm))
+                       vcpu->arch.apicv_active = true;
        } else
                static_key_slow_inc(&kvm_no_apic_vcpu);