OSDN Git Service

KVM: MMU: drop kvm_mmu_zap_mmio_sptes
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Mon, 10 Jun 2013 08:28:55 +0000 (16:28 +0800)
committerGleb Natapov <gleb@redhat.com>
Thu, 27 Jun 2013 11:20:40 +0000 (14:20 +0300)
Drop kvm_mmu_zap_mmio_sptes and use kvm_mmu_invalidate_zap_all_pages
instead to handle mmio generation number overflow

Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Reviewed-by: Gleb Natapov <gleb@redhat.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c

index 90d05ed..966f265 100644 (file)
@@ -230,7 +230,6 @@ struct kvm_mmu_page {
 #endif
 
        int write_flooding_count;
-       bool mmio_cached;
 };
 
 struct kvm_pio_request {
index c212101..7113a0f 100644 (file)
@@ -246,13 +246,11 @@ static unsigned int kvm_current_mmio_generation(struct kvm *kvm)
 static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn,
                           unsigned access)
 {
-       struct kvm_mmu_page *sp =  page_header(__pa(sptep));
        unsigned int gen = kvm_current_mmio_generation(kvm);
        u64 mask = generation_mmio_spte_mask(gen);
 
        access &= ACC_WRITE_MASK | ACC_USER_MASK;
        mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;
-       sp->mmio_cached = true;
 
        trace_mark_mmio_spte(sptep, gfn, access, gen);
        mmu_spte_set(sptep, mask);
@@ -4364,24 +4362,6 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
        spin_unlock(&kvm->mmu_lock);
 }
 
-static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
-{
-       struct kvm_mmu_page *sp, *node;
-       LIST_HEAD(invalid_list);
-
-       spin_lock(&kvm->mmu_lock);
-restart:
-       list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
-               if (!sp->mmio_cached)
-                       continue;
-               if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
-                       goto restart;
-       }
-
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
-       spin_unlock(&kvm->mmu_lock);
-}
-
 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
 {
        return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
@@ -4397,7 +4377,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
         * when mark memslot invalid.
         */
        if (unlikely(kvm_current_mmio_generation(kvm) >= (MMIO_MAX_GEN - 1)))
-               kvm_mmu_zap_mmio_sptes(kvm);
+               kvm_mmu_invalidate_zap_all_pages(kvm);
 }
 
 static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)