From 7f42aa76d4a5587939ee98d541ea1b97b382f408 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 18 Feb 2020 13:07:36 -0800 Subject: [PATCH] KVM: x86/mmu: Consolidate open coded variants of memslot TLB flushes Replace open coded instances of kvm_arch_flush_remote_tlbs_memslot()'s functionality with calls to the aforementioned function. Update the comment in kvm_arch_flush_remote_tlbs_memslot() to elaborate on how it is used and why it asserts that slots_lock is held. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7268c6ffb643..c4e0b97f82ac 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5863,13 +5863,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, spin_unlock(&kvm->mmu_lock); /* - * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log() - * which do tlb flush out of mmu-lock should be serialized by - * kvm->slots_lock otherwise tlb flush would be missed. - */ - lockdep_assert_held(&kvm->slots_lock); - - /* * We can flush all the TLBs out of the mmu lock without TLB * corruption since we just change the spte from writable to * readonly so that we only need to care the case of changing @@ -5881,8 +5874,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, * on PT_WRITABLE_MASK anymore. */ if (flush) - kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, - memslot->npages); + kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); } static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, @@ -5938,8 +5930,11 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) { /* - * All the TLBs can be flushed out of mmu lock, see the comments in - * kvm_mmu_slot_remove_write_access(). + * All current use cases for flushing the TLBs for a specific memslot + * are related to dirty logging, and do the TLB flush out of mmu_lock. + * The interaction between the various operations on memslot must be + * serialized by slots_locks to ensure the TLB flush from one operation + * is observed by any other operation on the same memslot. */ lockdep_assert_held(&kvm->slots_lock); kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, @@ -5955,8 +5950,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); spin_unlock(&kvm->mmu_lock); - lockdep_assert_held(&kvm->slots_lock); - /* * It's also safe to flush TLBs out of mmu lock here as currently this * function is only used for dirty logging, in which case flushing TLB @@ -5964,8 +5957,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, * dirty_bitmap. */ if (flush) - kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, - memslot->npages); + kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); } EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); @@ -5979,12 +5971,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, false); spin_unlock(&kvm->mmu_lock); - /* see kvm_mmu_slot_remove_write_access */ - lockdep_assert_held(&kvm->slots_lock); - if (flush) - kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, - memslot->npages); + kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); } EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); @@ -5997,12 +5985,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm, flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); spin_unlock(&kvm->mmu_lock); - lockdep_assert_held(&kvm->slots_lock); - - /* see kvm_mmu_slot_leaf_clear_dirty */ if (flush) - kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, - memslot->npages); + kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); } EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); -- 2.11.0