OSDN Git Service

kvm: x86: Make sync_page() flush remote TLBs once only
authorJunaid Shahid <junaids@google.com>
Wed, 27 Jun 2018 21:59:04 +0000 (14:59 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 6 Aug 2018 15:58:49 +0000 (17:58 +0200)
sync_page() calls set_spte() from a loop across a page table. It would
work better if set_spte() left the TLB flushing to its callers, so that
sync_page() can aggregate into a single call.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index d594690..75bc73e 100644 (file)
@@ -2724,6 +2724,10 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
        return true;
 }
 
+/* Bits which may be returned by set_spte() */
+#define SET_SPTE_WRITE_PROTECTED_PT    BIT(0)
+#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
+
 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                    unsigned pte_access, int level,
                    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
@@ -2800,7 +2804,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
                        pgprintk("%s: found shadow page for %llx, marking ro\n",
                                 __func__, gfn);
-                       ret = 1;
+                       ret |= SET_SPTE_WRITE_PROTECTED_PT;
                        pte_access &= ~ACC_WRITE_MASK;
                        spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
                }
@@ -2816,7 +2820,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
 set_pte:
        if (mmu_spte_update(sptep, spte))
-               kvm_flush_remote_tlbs(vcpu->kvm);
+               ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
 done:
        return ret;
 }
@@ -2827,6 +2831,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
 {
        int was_rmapped = 0;
        int rmap_count;
+       int set_spte_ret;
        int ret = RET_PF_RETRY;
 
        pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
@@ -2854,12 +2859,15 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
                        was_rmapped = 1;
        }
 
-       if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
-             true, host_writable)) {
+       set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
+                               speculative, true, host_writable);
+       if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
                if (write_fault)
                        ret = RET_PF_EMULATE;
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        }
+       if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
+               kvm_flush_remote_tlbs(vcpu->kvm);
 
        if (unlikely(is_mmio_spte(*sptep)))
                ret = RET_PF_EMULATE;
index 74996fc..208f764 100644 (file)
@@ -968,6 +968,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
        int i, nr_present = 0;
        bool host_writable;
        gpa_t first_pte_gpa;
+       int set_spte_ret = 0;
 
        /* direct kvm_mmu_page can not be unsync. */
        BUG_ON(sp->role.direct);
@@ -1024,12 +1025,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 
                host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
 
-               set_spte(vcpu, &sp->spt[i], pte_access,
-                        PT_PAGE_TABLE_LEVEL, gfn,
-                        spte_to_pfn(sp->spt[i]), true, false,
-                        host_writable);
+               set_spte_ret |= set_spte(vcpu, &sp->spt[i],
+                                        pte_access, PT_PAGE_TABLE_LEVEL,
+                                        gfn, spte_to_pfn(sp->spt[i]),
+                                        true, false, host_writable);
        }
 
+       if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH)
+               kvm_flush_remote_tlbs(vcpu->kvm);
+
        return nr_present;
 }