OSDN Git Service

Revert "KVM: MMU: collapse TLB flushes when zap all pages"
authorSean Christopherson <sean.j.christopherson@intel.com>
Tue, 5 Feb 2019 21:01:27 +0000 (13:01 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 20 Feb 2019 21:48:42 +0000 (22:48 +0100)
Unwinding optimizations related to obsolete pages is a step towards
removing x86 KVM's fast invalidate mechanism, i.e. this is one part of
a revert all patches from the series that introduced the mechanism[1].

This reverts commit f34d251d66ba263c077ed9d2bbd1874339a4c887.

[1] https://lkml.kernel.org/r/1369960590-14138-1-git-send-email-xiaoguangrong@linux.vnet.ibm.com

Cc: Xiao Guangrong <guangrong.xiao@gmail.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index 255b021..e733262 100644 (file)
@@ -2211,14 +2211,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list);
 
-/*
- * NOTE: we should pay more attention on the zapped-obsolete page
- * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
- * since it has been deleted from active_mmu_pages but still can be found
- * at hast list.
- *
- * for_each_valid_sp() has skipped that kind of pages.
- */
 #define for_each_valid_sp(_kvm, _sp, _gfn)                             \
        hlist_for_each_entry(_sp,                                       \
          &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
@@ -5881,13 +5873,11 @@ restart:
                if (sp->role.invalid)
                        continue;
 
-               /*
-                * Need not flush tlb since we only zap the sp with invalid
-                * generation number.
-                */
                if (batch >= BATCH_ZAP_PAGES &&
-                     cond_resched_lock(&kvm->mmu_lock)) {
+                     (need_resched() || spin_needbreak(&kvm->mmu_lock))) {
                        batch = 0;
+                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+                       cond_resched_lock(&kvm->mmu_lock);
                        goto restart;
                }
 
@@ -5898,10 +5888,6 @@ restart:
                        goto restart;
        }
 
-       /*
-        * Should flush tlb before free page tables since lockless-walking
-        * may use the pages.
-        */
        kvm_mmu_commit_zap_page(kvm, &invalid_list);
 }
 
@@ -5920,17 +5906,6 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
        trace_kvm_mmu_invalidate_zap_all_pages(kvm);
        kvm->arch.mmu_valid_gen++;
 
-       /*
-        * Notify all vcpus to reload its shadow page table
-        * and flush TLB. Then all vcpus will switch to new
-        * shadow page table with the new mmu_valid_gen.
-        *
-        * Note: we should do this under the protection of
-        * mmu-lock, otherwise, vcpu would purge shadow page
-        * but miss tlb flush.
-        */
-       kvm_reload_remote_mmus(kvm);
-
        kvm_zap_obsolete_pages(kvm);
        spin_unlock(&kvm->mmu_lock);
 }