OSDN Git Service

kvm: x86: Skip shadow page resync on CR3 switch when indicated by guest
authorJunaid Shahid <junaids@google.com>
Wed, 27 Jun 2018 21:59:18 +0000 (14:59 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 6 Aug 2018 15:59:00 +0000 (17:59 +0200)
When the guest indicates that the TLB doesn't need to be flushed in a
CR3 switch, we can also skip resyncing the shadow page tables since an
out-of-sync shadow page table is equivalent to an out-of-sync TLB.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 0f6965c..9446a36 100644 (file)
@@ -4098,9 +4098,19 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
                         */
 
                        kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
-                       kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-                       if (!skip_tlb_flush)
+                       if (!skip_tlb_flush) {
+                               kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
                                kvm_x86_ops->tlb_flush(vcpu, true);
+                       }
+
+                       /*
+                        * The last MMIO access's GVA and GPA are cached in the
+                        * VCPU. When switching to a new CR3, that GVA->GPA
+                        * mapping may no longer be valid. So clear any cached
+                        * MMIO info even when we don't need to sync the shadow
+                        * page tables.
+                        */
+                       vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
 
                        __clear_sp_write_flooding_count(
                                page_header(mmu->root_hpa));
@@ -5217,6 +5227,21 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
        struct kvm_mmu *mmu = &vcpu->arch.mmu;
 
        mmu->invlpg(vcpu, gva, mmu->root_hpa);
+
+       /*
+        * INVLPG is required to invalidate any global mappings for the VA,
+        * irrespective of PCID. Since it would take us roughly similar amount
+        * of work to determine whether the prev_root mapping of the VA is
+        * marked global, or to just sync it blindly, so we might as well just
+        * always sync it.
+        *
+        * Mappings not reachable via the current cr3 or the prev_root.cr3 will
+        * be synced when switching to that cr3, so nothing needs to be done
+        * here for them.
+        */
+       if (VALID_PAGE(mmu->prev_root.hpa))
+               mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);
+
        kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        ++vcpu->stat.invlpg;
 }
@@ -5232,8 +5257,10 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
        }
 
        if (VALID_PAGE(mmu->prev_root.hpa) &&
-           pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3))
+           pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3)) {
+               mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+       }
 
        ++vcpu->stat.invlpg;
 
index 6151418..b812100 100644 (file)
@@ -8821,7 +8821,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
 
                if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_root.cr3)
                    == operand.pcid)
-                       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+                       kvm_mmu_free_roots(vcpu, KVM_MMU_ROOT_PREVIOUS);
 
                /*
                 * If neither the current cr3 nor the prev_root.cr3 use the
index 493afbf..aa5d96b 100644 (file)
@@ -858,10 +858,10 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 #endif
 
        if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
-               kvm_mmu_sync_roots(vcpu);
-
-               if (!skip_tlb_flush)
+               if (!skip_tlb_flush) {
+                       kvm_mmu_sync_roots(vcpu);
                        kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+               }
                return 0;
        }