OSDN Git Service

KVM: x86: Drop skip MMU sync and TLB flush params from "new PGD" helpers
authorSean Christopherson <seanjc@google.com>
Wed, 9 Jun 2021 23:42:27 +0000 (16:42 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 17 Jun 2021 17:09:52 +0000 (13:09 -0400)
Drop skip_mmu_sync and skip_tlb_flush from __kvm_mmu_new_pgd() now that
all call sites unconditionally skip both the sync and flush.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210609234235.1244004-8-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c

index f44a979..d866bfe 100644 (file)
@@ -1708,8 +1708,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                            gva_t gva, hpa_t root_hpa);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
-void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
-                    bool skip_mmu_sync);
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
 
 void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
                       int tdp_huge_page_level);
index 64d7342..894b9a4 100644 (file)
@@ -3949,8 +3949,7 @@ static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
 }
 
 static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
-                             union kvm_mmu_page_role new_role,
-                             bool skip_tlb_flush, bool skip_mmu_sync)
+                             union kvm_mmu_page_role new_role)
 {
        if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
                kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
@@ -3965,10 +3964,10 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
         */
        kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
 
-       if (!skip_mmu_sync || force_flush_and_sync_on_reuse)
+       if (force_flush_and_sync_on_reuse) {
                kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-       if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
                kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+       }
 
        /*
         * The last MMIO access's GVA and GPA are cached in the VCPU. When
@@ -3987,11 +3986,9 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
                                to_shadow_page(vcpu->arch.mmu->root_hpa));
 }
 
-void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
-                    bool skip_mmu_sync)
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
 {
-       __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
-                         skip_tlb_flush, skip_mmu_sync);
+       __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
 
@@ -4684,7 +4681,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
        struct kvm_mmu *context = &vcpu->arch.guest_mmu;
        union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
 
-       __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, true, true);
+       __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
 
        if (new_role.as_u64 != context->mmu_role.as_u64) {
                shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
@@ -4736,7 +4733,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
                kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
                                                   execonly, level);
 
-       __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
+       __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
 
        if (new_role.as_u64 == context->mmu_role.as_u64)
                return;
index 20e6722..5f45991 100644 (file)
@@ -414,7 +414,7 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
                return -EINVAL;
 
        if (!nested_npt)
-               kvm_mmu_new_pgd(vcpu, cr3, true, true);
+               kvm_mmu_new_pgd(vcpu, cr3);
 
        vcpu->arch.cr3 = cr3;
        kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
index 7210e7c..d07b83b 100644 (file)
@@ -1129,12 +1129,8 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
                return -EINVAL;
        }
 
-       /*
-        * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
-        * flushes are handled by nested_vmx_transition_tlb_flush().
-        */
        if (!nested_ept) {
-               kvm_mmu_new_pgd(vcpu, cr3, true, true);
+               kvm_mmu_new_pgd(vcpu, cr3);
 
                /*
                 * A TLB flush on VM-Enter/VM-Exit flushes all linear mappings
index 7d2c7a3..1a0fb0f 100644 (file)
@@ -1115,7 +1115,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                return 1;
 
        if (cr3 != kvm_read_cr3(vcpu))
-               kvm_mmu_new_pgd(vcpu, cr3, true, true);
+               kvm_mmu_new_pgd(vcpu, cr3);
 
        vcpu->arch.cr3 = cr3;
        kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);