OSDN Git Service

KVM: nSVM: refactor nested_svm_vmrun
authorLadi Prosek <lprosek@redhat.com>
Wed, 11 Oct 2017 14:54:44 +0000 (16:54 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 12 Oct 2017 12:01:56 +0000 (14:01 +0200)
Analogous to 858e25c06fb0 ("kvm: nVMX: Refactor nested_vmx_run()"), this commit splits
nested_svm_vmrun into two parts. The newly introduced enter_svm_guest_mode modifies the
vcpu state to transition from L1 to L2, while the code left in nested_svm_vmrun handles
the VMRUN instruction.

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm.c

index e3c61a3..6edefab 100644 (file)
@@ -2924,70 +2924,9 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
        return true;
 }
 
-static bool nested_svm_vmrun(struct vcpu_svm *svm)
+static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+                                struct vmcb *nested_vmcb, struct page *page)
 {
-       struct vmcb *nested_vmcb;
-       struct vmcb *hsave = svm->nested.hsave;
-       struct vmcb *vmcb = svm->vmcb;
-       struct page *page;
-       u64 vmcb_gpa;
-
-       vmcb_gpa = svm->vmcb->save.rax;
-
-       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
-       if (!nested_vmcb)
-               return false;
-
-       if (!nested_vmcb_checks(nested_vmcb)) {
-               nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
-               nested_vmcb->control.exit_code_hi = 0;
-               nested_vmcb->control.exit_info_1  = 0;
-               nested_vmcb->control.exit_info_2  = 0;
-
-               nested_svm_unmap(page);
-
-               return false;
-       }
-
-       trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
-                              nested_vmcb->save.rip,
-                              nested_vmcb->control.int_ctl,
-                              nested_vmcb->control.event_inj,
-                              nested_vmcb->control.nested_ctl);
-
-       trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
-                                   nested_vmcb->control.intercept_cr >> 16,
-                                   nested_vmcb->control.intercept_exceptions,
-                                   nested_vmcb->control.intercept);
-
-       /* Clear internal status */
-       kvm_clear_exception_queue(&svm->vcpu);
-       kvm_clear_interrupt_queue(&svm->vcpu);
-
-       /*
-        * Save the old vmcb, so we don't need to pick what we save, but can
-        * restore everything when a VMEXIT occurs
-        */
-       hsave->save.es     = vmcb->save.es;
-       hsave->save.cs     = vmcb->save.cs;
-       hsave->save.ss     = vmcb->save.ss;
-       hsave->save.ds     = vmcb->save.ds;
-       hsave->save.gdtr   = vmcb->save.gdtr;
-       hsave->save.idtr   = vmcb->save.idtr;
-       hsave->save.efer   = svm->vcpu.arch.efer;
-       hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
-       hsave->save.cr4    = svm->vcpu.arch.cr4;
-       hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
-       hsave->save.rip    = kvm_rip_read(&svm->vcpu);
-       hsave->save.rsp    = vmcb->save.rsp;
-       hsave->save.rax    = vmcb->save.rax;
-       if (npt_enabled)
-               hsave->save.cr3    = vmcb->save.cr3;
-       else
-               hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
-
-       copy_vmcb_control_area(hsave, vmcb);
-
        if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
                svm->vcpu.arch.hflags |= HF_HIF_MASK;
        else
@@ -3080,6 +3019,73 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
        enable_gif(svm);
 
        mark_all_dirty(svm->vmcb);
+}
+
+static bool nested_svm_vmrun(struct vcpu_svm *svm)
+{
+       struct vmcb *nested_vmcb;
+       struct vmcb *hsave = svm->nested.hsave;
+       struct vmcb *vmcb = svm->vmcb;
+       struct page *page;
+       u64 vmcb_gpa;
+
+       vmcb_gpa = svm->vmcb->save.rax;
+
+       nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
+       if (!nested_vmcb)
+               return false;
+
+       if (!nested_vmcb_checks(nested_vmcb)) {
+               nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
+               nested_vmcb->control.exit_code_hi = 0;
+               nested_vmcb->control.exit_info_1  = 0;
+               nested_vmcb->control.exit_info_2  = 0;
+
+               nested_svm_unmap(page);
+
+               return false;
+       }
+
+       trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
+                              nested_vmcb->save.rip,
+                              nested_vmcb->control.int_ctl,
+                              nested_vmcb->control.event_inj,
+                              nested_vmcb->control.nested_ctl);
+
+       trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
+                                   nested_vmcb->control.intercept_cr >> 16,
+                                   nested_vmcb->control.intercept_exceptions,
+                                   nested_vmcb->control.intercept);
+
+       /* Clear internal status */
+       kvm_clear_exception_queue(&svm->vcpu);
+       kvm_clear_interrupt_queue(&svm->vcpu);
+
+       /*
+        * Save the old vmcb, so we don't need to pick what we save, but can
+        * restore everything when a VMEXIT occurs
+        */
+       hsave->save.es     = vmcb->save.es;
+       hsave->save.cs     = vmcb->save.cs;
+       hsave->save.ss     = vmcb->save.ss;
+       hsave->save.ds     = vmcb->save.ds;
+       hsave->save.gdtr   = vmcb->save.gdtr;
+       hsave->save.idtr   = vmcb->save.idtr;
+       hsave->save.efer   = svm->vcpu.arch.efer;
+       hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
+       hsave->save.cr4    = svm->vcpu.arch.cr4;
+       hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
+       hsave->save.rip    = kvm_rip_read(&svm->vcpu);
+       hsave->save.rsp    = vmcb->save.rsp;
+       hsave->save.rax    = vmcb->save.rax;
+       if (npt_enabled)
+               hsave->save.cr3    = vmcb->save.cr3;
+       else
+               hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
+
+       copy_vmcb_control_area(hsave, vmcb);
+
+       enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
 
        return true;
 }