OSDN Git Service

kvm: nVMX: Refactor handle_vmon()
authorJim Mattson <jmattson@google.com>
Wed, 30 Nov 2016 20:03:43 +0000 (12:03 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Feb 2017 13:54:37 +0000 (14:54 +0100)
Handle_vmon is split into two parts: the part that handles the VMXON
instruction, and the part that modifies the vcpu state to transition
from legacy mode to VMX operation. The latter will be used when
restoring the checkpointed state of a vCPU that was in VMX operation
when a snapshot was taken.

Signed-off-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx.c

index 4b4b59b..33cb8d2 100644 (file)
@@ -7124,6 +7124,53 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
        return 0;
 }
 
+static int enter_vmx_operation(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct vmcs *shadow_vmcs;
+
+       if (cpu_has_vmx_msr_bitmap()) {
+               vmx->nested.msr_bitmap =
+                               (unsigned long *)__get_free_page(GFP_KERNEL);
+               if (!vmx->nested.msr_bitmap)
+                       goto out_msr_bitmap;
+       }
+
+       vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+       if (!vmx->nested.cached_vmcs12)
+               goto out_cached_vmcs12;
+
+       if (enable_shadow_vmcs) {
+               shadow_vmcs = alloc_vmcs();
+               if (!shadow_vmcs)
+                       goto out_shadow_vmcs;
+               /* mark vmcs as shadow */
+               shadow_vmcs->revision_id |= (1u << 31);
+               /* init shadow vmcs */
+               vmcs_clear(shadow_vmcs);
+               vmx->vmcs01.shadow_vmcs = shadow_vmcs;
+       }
+
+       INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
+       vmx->nested.vmcs02_num = 0;
+
+       hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_REL_PINNED);
+       vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
+
+       vmx->nested.vmxon = true;
+       return 0;
+
+out_shadow_vmcs:
+       kfree(vmx->nested.cached_vmcs12);
+
+out_cached_vmcs12:
+       free_page((unsigned long)vmx->nested.msr_bitmap);
+
+out_msr_bitmap:
+       return -ENOMEM;
+}
+
 /*
  * Emulate the VMXON instruction.
  * Currently, we just remember that VMX is active, and do not save or even
@@ -7134,9 +7181,9 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
  */
 static int handle_vmon(struct kvm_vcpu *vcpu)
 {
+       int ret;
        struct kvm_segment cs;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct vmcs *shadow_vmcs;
        const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
                | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 
@@ -7176,49 +7223,13 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 
        if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
                return 1;
-
-       if (cpu_has_vmx_msr_bitmap()) {
-               vmx->nested.msr_bitmap =
-                               (unsigned long *)__get_free_page(GFP_KERNEL);
-               if (!vmx->nested.msr_bitmap)
-                       goto out_msr_bitmap;
-       }
-
-       vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
-       if (!vmx->nested.cached_vmcs12)
-               goto out_cached_vmcs12;
-
-       if (enable_shadow_vmcs) {
-               shadow_vmcs = alloc_vmcs();
-               if (!shadow_vmcs)
-                       goto out_shadow_vmcs;
-               /* mark vmcs as shadow */
-               shadow_vmcs->revision_id |= (1u << 31);
-               /* init shadow vmcs */
-               vmcs_clear(shadow_vmcs);
-               vmx->vmcs01.shadow_vmcs = shadow_vmcs;
-       }
-
-       INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
-       vmx->nested.vmcs02_num = 0;
-
-       hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
-                    HRTIMER_MODE_REL_PINNED);
-       vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
-
-       vmx->nested.vmxon = true;
+       ret = enter_vmx_operation(vcpu);
+       if (ret)
+               return ret;
 
        nested_vmx_succeed(vcpu);
        return kvm_skip_emulated_instruction(vcpu);
-
-out_shadow_vmcs:
-       kfree(vmx->nested.cached_vmcs12);
-
-out_cached_vmcs12:
-       free_page((unsigned long)vmx->nested.msr_bitmap);
-
-out_msr_bitmap:
-       return -ENOMEM;
 }
 
 /*