OSDN Git Service

KVM: MMU: pass kvm_mmu_page struct to make_spte
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 17 Aug 2021 11:43:19 +0000 (07:43 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 1 Oct 2021 07:44:55 +0000 (03:44 -0400)
The level and A/D bit support of the new SPTE can be found in the role,
which is stored in the kvm_mmu_page struct.  This merges two arguments
into one.

For the TDP MMU, the kvm_mmu_page was not used (kvm_tdp_mmu_map does
not use it if the SPTE is already present) so we fetch it just before
calling make_spte.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/spte.c
arch/x86/kvm/mmu/spte.h
arch/x86/kvm/mmu/tdp_mmu.c

index 9130300..c208f00 100644 (file)
@@ -2716,8 +2716,8 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        was_rmapped = 1;
        }
 
-       wrprot = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
-                          true, host_writable, sp_ad_disabled(sp), &spte);
+       wrprot = make_spte(vcpu, sp, pte_access, gfn, pfn, *sptep, speculative,
+                          true, host_writable, &spte);
 
        if (*sptep == spte) {
                ret = RET_PF_SPURIOUS;
index 7f2c6ee..fbbaa3f 100644 (file)
@@ -1128,9 +1128,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
                sptep = &sp->spt[i];
                spte = *sptep;
                host_writable = spte & shadow_host_writable_mask;
-               make_spte(vcpu, pte_access, PG_LEVEL_4K, gfn,
+               make_spte(vcpu, sp, pte_access, gfn,
                          spte_to_pfn(spte), spte, true, false,
-                         host_writable, sp_ad_disabled(sp), &spte);
+                         host_writable, &spte);
 
                flush |= mmu_spte_update(sptep, spte);
        }
index 29ea996..2c5c14f 100644 (file)
@@ -89,15 +89,16 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
                                     E820_TYPE_RAM);
 }
 
-bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
-                    gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
-                    bool can_unsync, bool host_writable, bool ad_disabled,
-                    u64 *new_spte)
+bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+              unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
+              u64 old_spte, bool speculative, bool can_unsync,
+              bool host_writable, u64 *new_spte)
 {
+       int level = sp->role.level;
        u64 spte = SPTE_MMU_PRESENT_MASK;
        bool wrprot = false;
 
-       if (ad_disabled)
+       if (sp->role.ad_disabled)
                spte |= SPTE_TDP_AD_DISABLED_MASK;
        else if (kvm_vcpu_ad_need_write_protect(vcpu))
                spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
index 1998ec5..cbb02a9 100644 (file)
@@ -334,10 +334,10 @@ static inline u64 get_mmio_spte_generation(u64 spte)
        return gen;
 }
 
-bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
-                    gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
-                    bool can_unsync, bool host_writable, bool ad_disabled,
-                    u64 *new_spte);
+bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+              unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
+              u64 old_spte, bool speculative, bool can_unsync,
+              bool host_writable, u64 *new_spte);
 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access);
 u64 mark_spte_for_access_track(u64 spte);
index 1cdb561..6dbf289 100644 (file)
@@ -897,17 +897,18 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
                                          struct kvm_page_fault *fault,
                                          struct tdp_iter *iter)
 {
+       struct kvm_mmu_page *sp = sptep_to_sp(iter->sptep);
        u64 new_spte;
        int ret = RET_PF_FIXED;
        bool wrprot = false;
 
+       WARN_ON(sp->role.level != fault->goal_level);
        if (unlikely(!fault->slot))
                new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
        else
-               wrprot = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
+               wrprot = make_spte(vcpu, sp, ACC_ALL, iter->gfn,
                                         fault->pfn, iter->old_spte, fault->prefault, true,
-                                        fault->map_writable, !shadow_accessed_mask,
-                                        &new_spte);
+                                        fault->map_writable, &new_spte);
 
        if (new_spte == iter->old_spte)
                ret = RET_PF_SPURIOUS;