OSDN Git Service

KVM: x86/mmu: Don't install bogus MMIO SPTEs if MMIO caching is disabled
authorSean Christopherson <seanjc@google.com>
Thu, 25 Feb 2021 20:47:31 +0000 (12:47 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 15 Mar 2021 08:43:38 +0000 (04:43 -0400)
If MMIO caching is disabled, e.g. when using shadow paging on CPUs with
52 bits of PA space, go straight to MMIO emulation and don't install an
MMIO SPTE.  The SPTE will just generate a !PRESENT #PF, i.e. can't
actually accelerate future MMIO.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210225204749.1512652-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/spte.c

index df75ebb..dd946fb 100644 (file)
@@ -2948,9 +2948,19 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
                return true;
        }
 
-       if (unlikely(is_noslot_pfn(pfn)))
+       if (unlikely(is_noslot_pfn(pfn))) {
                vcpu_cache_mmio_info(vcpu, gva, gfn,
                                     access & shadow_mmio_access_mask);
+               /*
+                * If MMIO caching is disabled, emulate immediately without
+                * touching the shadow page tables as attempting to install an
+                * MMIO SPTE will just be an expensive nop.
+                */
+               if (unlikely(!shadow_mmio_value)) {
+                       *ret_val = RET_PF_EMULATE;
+                       return true;
+               }
+       }
 
        return false;
 }
index 9ea097b..dcba9c1 100644 (file)
@@ -51,6 +51,8 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
        u64 mask = generation_mmio_spte_mask(gen);
        u64 gpa = gfn << PAGE_SHIFT;
 
+       WARN_ON_ONCE(!shadow_mmio_value);
+
        access &= shadow_mmio_access_mask;
        mask |= shadow_mmio_value | access;
        mask |= gpa | shadow_nonpresent_or_rsvd_mask;
@@ -258,7 +260,10 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
                                  SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
                mmio_value = 0;
 
-       shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
+       if (mmio_value)
+               shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
+       else
+               shadow_mmio_value = 0;
        shadow_mmio_access_mask = access_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);