OSDN Git Service

KVM: X86: Don't unload MMU in kvm_vcpu_flush_tlb_guest()
authorLai Jiangshan <laijs@linux.alibaba.com>
Tue, 19 Oct 2021 11:01:54 +0000 (19:01 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 22 Oct 2021 09:44:43 +0000 (05:44 -0400)
kvm_mmu_unload() destroys all the PGD caches.  Use the lighter
kvm_mmu_sync_roots() and kvm_mmu_sync_prev_roots() instead.

Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20211019110154.4091-5-jiangshanlai@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/x86.c

index 3456f4d..9ae6168 100644 (file)
@@ -79,6 +79,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
+void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
 
 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
 {
index cb7622e..28d0618 100644 (file)
@@ -3647,6 +3647,9 @@ static bool is_unsync_root(hpa_t root)
 {
        struct kvm_mmu_page *sp;
 
+       if (!VALID_PAGE(root))
+               return false;
+
        /*
         * The read barrier orders the CPU's read of SPTE.W during the page table
         * walk before the reads of sp->unsync/sp->unsync_children here.
@@ -3714,6 +3717,19 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
        write_unlock(&vcpu->kvm->mmu_lock);
 }
 
+void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
+{
+       unsigned long roots_to_free = 0;
+       int i;
+
+       for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+               if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
+                       roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+
+       /* sync prev_roots by simply freeing them */
+       kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
+}
+
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
                                  u32 access, struct x86_exception *exception)
 {
index 9d25ef7..3a74540 100644 (file)
@@ -3245,15 +3245,14 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
        ++vcpu->stat.tlb_flush;
 
        if (!tdp_enabled) {
-               /*
+               /*
                 * A TLB flush on behalf of the guest is equivalent to
                 * INVPCID(all), toggling CR4.PGE, etc., which requires
-                * a forced sync of the shadow page tables.  Unload the
-                * entire MMU here and the subsequent load will sync the
-                * shadow page tables, and also flush the TLB.
+                * a forced sync of the shadow page tables.  Ensure all the
+                * roots are synced and the guest TLB in hardware is clean.
                 */
-               kvm_mmu_unload(vcpu);
-               return;
+               kvm_mmu_sync_roots(vcpu);
+               kvm_mmu_sync_prev_roots(vcpu);
        }
 
        static_call(kvm_x86_tlb_flush_guest)(vcpu);