KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
};
+#define APICV_INHIBIT_REASON_DISABLE 0
+
struct kvm_arch {
unsigned long n_used_mmu_pages;
unsigned long n_requested_mmu_pages;
struct kvm_apic_map *apic_map;
bool apic_access_page_done;
+ unsigned long apicv_inhibit_reasons;
gpa_t wall_clock;
struct x86_exception *exception);
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
+bool kvm_apicv_activated(struct kvm *kvm);
+void kvm_apicv_init(struct kvm *kvm, bool enable);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
return err;
}
+static int svm_vm_init(struct kvm *kvm)
+{
+ if (avic) {
+ int ret = avic_vm_init(kvm);
+ if (ret)
+ return ret;
+ }
+
+ kvm_apicv_init(kvm, avic && irqchip_split(kvm));
+ return 0;
+}
+
static inline int
avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
{
.vm_alloc = svm_vm_alloc,
.vm_free = svm_vm_free,
- .vm_init = avic_vm_init,
+ .vm_init = svm_vm_init,
.vm_destroy = svm_vm_destroy,
.prepare_guest_switch = svm_prepare_guest_switch,
break;
}
}
+ kvm_apicv_init(kvm, enable_apicv);
return 0;
}
kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
}
+bool kvm_apicv_activated(struct kvm *kvm)
+{
+ return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
+}
+EXPORT_SYMBOL_GPL(kvm_apicv_activated);
+
+void kvm_apicv_init(struct kvm *kvm, bool enable)
+{
+ if (enable)
+ clear_bit(APICV_INHIBIT_REASON_DISABLE,
+ &kvm->arch.apicv_inhibit_reasons);
+ else
+ set_bit(APICV_INHIBIT_REASON_DISABLE,
+ &kvm->arch.apicv_inhibit_reasons);
+}
+EXPORT_SYMBOL_GPL(kvm_apicv_init);
+
static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
{
struct kvm_vcpu *target = NULL;
return r;
if (irqchip_in_kernel(vcpu->kvm)) {
- vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu->kvm);
r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
if (r < 0)
goto fail_mmu_destroy;
+ if (kvm_apicv_activated(vcpu->kvm))
+ vcpu->arch.apicv_active = true;
} else
static_key_slow_inc(&kvm_no_apic_vcpu);