From: Sean Christopherson Date: Fri, 26 Mar 2021 02:19:40 +0000 (-0700) Subject: KVM: x86/mmu: Coalesce TDP MMU TLB flushes when zapping collapsible SPTEs X-Git-Tag: v5.13-rc1~76^2~152 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=af95b53e56e34a4df343cec32b3a3276d9d06ad3;p=tomoyo%2Ftomoyo-test1.git KVM: x86/mmu: Coalesce TDP MMU TLB flushes when zapping collapsible SPTEs When zapping collapsible SPTEs across multiple roots, gather pending flushes and perform a single remote TLB flush at the end, as opposed to flushing after processing every root. Note, flush may be cleared by the result of zap_collapsible_spte_range(). This is intended and correct, e.g. yielding may have serviced a prior pending flush. Cc: Ben Gardon Signed-off-by: Sean Christopherson Message-Id: <20210326021957.1424875-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index fd5000863678..31459c94290d 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1268,21 +1268,21 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, * Clear leaf entries which could be replaced by large mappings, for * GFNs within the slot. */ -static void zap_collapsible_spte_range(struct kvm *kvm, +static bool zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, - struct kvm_memory_slot *slot) + struct kvm_memory_slot *slot, + bool flush) { gfn_t start = slot->base_gfn; gfn_t end = start + slot->npages; struct tdp_iter iter; kvm_pfn_t pfn; - bool spte_set = false; rcu_read_lock(); tdp_root_for_each_pte(iter, root, start, end) { - if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) { - spte_set = false; + if (tdp_mmu_iter_cond_resched(kvm, &iter, flush)) { + flush = false; continue; } @@ -1298,12 +1298,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm, tdp_mmu_set_spte(kvm, &iter, 0); - spte_set = true; + flush = true; } rcu_read_unlock(); - if (spte_set) - kvm_flush_remote_tlbs(kvm); + + return flush; } /* @@ -1314,6 +1314,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, struct kvm_memory_slot *slot) { struct kvm_mmu_page *root; + bool flush = false; int root_as_id; for_each_tdp_mmu_root_yield_safe(kvm, root) { @@ -1321,8 +1322,11 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, if (root_as_id != slot->as_id) continue; - zap_collapsible_spte_range(kvm, root, slot); + flush = zap_collapsible_spte_range(kvm, root, slot, flush); } + + if (flush) + kvm_flush_remote_tlbs(kvm); } /*