OSDN Git Service

KVM: x86: Allocate vcpu struct in common x86 code
authorSean Christopherson <sean.j.christopherson@intel.com>
Wed, 18 Dec 2019 21:54:52 +0000 (13:54 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 24 Jan 2020 08:18:55 +0000 (09:18 +0100)
Move allocation of VMX and SVM vcpus to common x86.  Although the struct
being allocated is technically a VMX/SVM struct, it can be interpreted
directly as a 'struct kvm_vcpu' because of the pre-existing requirement
that 'struct kvm_vcpu' be located at offset zero of the arch/vendor vcpu
struct.

Remove the message from the build-time assertions regarding placement of
the struct, as compatibility with the arch usercopy region is no longer
the sole dependent on 'struct kvm_vcpu' being at offset zero.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 0b5c280..aa591a7 100644 (file)
@@ -1050,7 +1050,7 @@ struct kvm_x86_ops {
        void (*vm_destroy)(struct kvm *kvm);
 
        /* Create, but do not attach this VCPU */
-       struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
+       int (*vcpu_create)(struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned id);
        void (*vcpu_free)(struct kvm_vcpu *vcpu);
        void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
 
index b0d9045..319c487 100644 (file)
@@ -2187,9 +2187,9 @@ static int avic_init_vcpu(struct vcpu_svm *svm)
        return ret;
 }
 
-static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
+static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
+                          unsigned int id)
 {
-       struct kvm_vcpu *vcpu;
        struct vcpu_svm *svm;
        struct page *page;
        struct page *msrpm_pages;
@@ -2197,22 +2197,15 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
        struct page *nested_msrpm_pages;
        int err;
 
-       BUILD_BUG_ON_MSG(offsetof(struct vcpu_svm, vcpu) != 0,
-               "struct kvm_vcpu must be at offset 0 for arch usercopy region");
-
-       svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
-       if (!svm) {
-               err = -ENOMEM;
-               goto out;
-       }
-       vcpu = &svm->vcpu;
+       BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
+       svm = to_svm(vcpu);
 
        vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
                                                GFP_KERNEL_ACCOUNT);
        if (!vcpu->arch.user_fpu) {
                printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
                err = -ENOMEM;
-               goto free_partial_svm;
+               goto out;
        }
 
        vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
@@ -2225,7 +2218,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 
        err = kvm_vcpu_init(vcpu, kvm, id);
        if (err)
-               goto free_svm;
+               goto free_guest_fpu;
 
        err = -ENOMEM;
        page = alloc_page(GFP_KERNEL_ACCOUNT);
@@ -2269,7 +2262,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 
        svm_init_osvw(vcpu);
 
-       return vcpu;
+       return 0;
 
 free_page4:
        __free_page(hsave_page);
@@ -2281,14 +2274,12 @@ free_page1:
        __free_page(page);
 uninit:
        kvm_vcpu_uninit(vcpu);
-free_svm:
+free_guest_fpu:
        kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
 free_user_fpu:
        kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
-free_partial_svm:
-       kmem_cache_free(kvm_vcpu_cache, svm);
 out:
-       return ERR_PTR(err);
+       return err;
 }
 
 static void svm_clear_current_vmcb(struct vmcb *vmcb)
@@ -2317,7 +2308,6 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
        kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
-       kmem_cache_free(kvm_vcpu_cache, svm);
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
index e2da908..2cbeb0a 100644 (file)
@@ -6684,31 +6684,24 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
        kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
-       kmem_cache_free(kvm_vcpu_cache, vmx);
 }
 
-static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
+                          unsigned int id)
 {
-       struct kvm_vcpu *vcpu;
        struct vcpu_vmx *vmx;
        unsigned long *msr_bitmap;
        int i, cpu, err;
 
-       BUILD_BUG_ON_MSG(offsetof(struct vcpu_vmx, vcpu) != 0,
-               "struct kvm_vcpu must be at offset 0 for arch usercopy region");
-
-       vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
-       if (!vmx)
-               return ERR_PTR(-ENOMEM);
-
-       vcpu = &vmx->vcpu;
+       BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
+       vmx = to_vmx(vcpu);
 
        vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
                                                GFP_KERNEL_ACCOUNT);
        if (!vcpu->arch.user_fpu) {
                printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
                err = -ENOMEM;
-               goto free_partial_vcpu;
+               goto out;
        }
 
        vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
@@ -6829,7 +6822,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 
        vmx->ept_pointer = INVALID_PAGE;
 
-       return vcpu;
+       return 0;
 
 free_vmcs:
        free_loaded_vmcs(vmx->loaded_vmcs);
@@ -6842,9 +6835,8 @@ free_vcpu:
        kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
 free_user_fpu:
        kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
-free_partial_vcpu:
-       kmem_cache_free(kvm_vcpu_cache, vmx);
-       return ERR_PTR(err);
+out:
+       return err;
 }
 
 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
index a3eeeb5..cfcefdb 100644 (file)
@@ -9172,26 +9172,34 @@ static void fx_init(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
-       void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
-
        kvmclock_reset(vcpu);
 
        kvm_x86_ops->vcpu_free(vcpu);
-       free_cpumask_var(wbinvd_dirty_mask);
+
+       free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
+       kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
                                                unsigned int id)
 {
        struct kvm_vcpu *vcpu;
+       int r;
 
        if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
                printk_once(KERN_WARNING
                "kvm: SMP vm created on host with unstable TSC; "
                "guest TSC will not be reliable\n");
 
-       vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
+       if (!vcpu)
+               return ERR_PTR(-ENOMEM);
 
+       r = kvm_x86_ops->vcpu_create(kvm, vcpu, id);
+       if (r) {
+               kmem_cache_free(kvm_vcpu_cache, vcpu);
+               return ERR_PTR(r);
+       }
        return vcpu;
 }