OSDN Git Service

KVM: nVMX: do not start the preemption timer hrtimer unnecessarily
[tomoyo/tomoyo-test1.git] / arch / x86 / kvm / vmx / nested.c
index be9b2c8..11431bb 100644 (file)
@@ -211,7 +211,6 @@ static void free_nested(struct kvm_vcpu *vcpu)
        if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
 
-       hrtimer_cancel(&vmx->nested.preemption_timer);
        vmx->nested.vmxon = false;
        vmx->nested.smm.vmxon = false;
        free_vpid(vmx->nested.vpid02);
@@ -274,6 +273,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
        vcpu_load(vcpu);
+       vmx_leave_nested(vcpu);
        vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
        free_nested(vcpu);
        vcpu_put(vcpu);
@@ -2278,10 +2278,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        }
        vmx_set_rflags(vcpu, vmcs12->guest_rflags);
 
-       vmx->nested.preemption_timer_expired = false;
-       if (nested_cpu_has_preemption_timer(vmcs12))
-               vmx_start_preemption_timer(vcpu);
-
        /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
         * bitwise-or of what L1 wants to trap for L2, and what we want to
         * trap. Note that CR0.TS also needs updating - we do this later.
@@ -3019,6 +3015,15 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
                kvm_make_request(KVM_REQ_EVENT, vcpu);
 
        /*
+        * Do not start the preemption timer hrtimer until after we know
+        * we are successful, so that only nested_vmx_vmexit needs to cancel
+        * the timer.
+        */
+       vmx->nested.preemption_timer_expired = false;
+       if (nested_cpu_has_preemption_timer(vmcs12))
+               vmx_start_preemption_timer(vcpu);
+
+       /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
         * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
         * returned as far as L1 is concerned. It will only return (and set
@@ -3438,13 +3443,10 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        else
                vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
 
-       if (nested_cpu_has_preemption_timer(vmcs12)) {
-               if (vmcs12->vm_exit_controls &
-                   VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
+       if (nested_cpu_has_preemption_timer(vmcs12) &&
+           vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
                        vmcs12->vmx_preemption_timer_value =
                                vmx_get_preemption_timer_value(vcpu);
-               hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
-       }
 
        /*
         * In some cases (usually, nested EPT), L2 is allowed to change its
@@ -3852,6 +3854,9 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 
        leave_guest_mode(vcpu);
 
+       if (nested_cpu_has_preemption_timer(vmcs12))
+               hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
+
        if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
                vcpu->arch.tsc_offset -= vmcs12->tsc_offset;