OSDN Git Service

KVM: x86/mmu: Capture requested page level before NX huge page workaround
authorSean Christopherson <sean.j.christopherson@intel.com>
Wed, 23 Sep 2020 18:37:31 +0000 (11:37 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 28 Sep 2020 11:57:41 +0000 (07:57 -0400)
Apply the "huge page disallowed" adjustment of the max level only after
capturing the original requested level.  The requested level will be
used in a future patch to skip adding pages to the list of disallowed
huge pages if a huge page wasn't possible anyways, e.g. if the page
isn't mapped as a huge page in the host.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200923183735.584-5-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index fb15683..0f35f1a 100644 (file)
@@ -3267,7 +3267,8 @@ static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
 }
 
 static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
-                                  int max_level, kvm_pfn_t *pfnp)
+                                  int max_level, kvm_pfn_t *pfnp,
+                                  bool huge_page_disallowed, int *req_level)
 {
        struct kvm_memory_slot *slot;
        struct kvm_lpage_info *linfo;
@@ -3275,6 +3276,8 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
        kvm_pfn_t mask;
        int level;
 
+       *req_level = PG_LEVEL_4K;
+
        if (unlikely(max_level == PG_LEVEL_4K))
                return PG_LEVEL_4K;
 
@@ -3299,7 +3302,14 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
        if (level == PG_LEVEL_4K)
                return level;
 
-       level = min(level, max_level);
+       *req_level = level = min(level, max_level);
+
+       /*
+        * Enforce the iTLB multihit workaround after capturing the requested
+        * level, which will be used to do precise, accurate accounting.
+        */
+       if (huge_page_disallowed)
+               return PG_LEVEL_4K;
 
        /*
         * mmu_notifier_retry() was successful and mmu_lock is held, so
@@ -3345,17 +3355,15 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
        bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
        struct kvm_shadow_walk_iterator it;
        struct kvm_mmu_page *sp;
-       int level, ret;
+       int level, req_level, ret;
        gfn_t gfn = gpa >> PAGE_SHIFT;
        gfn_t base_gfn = gfn;
 
        if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
                return RET_PF_RETRY;
 
-       if (huge_page_disallowed)
-               max_level = PG_LEVEL_4K;
-
-       level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn);
+       level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
+                                       huge_page_disallowed, &req_level);
 
        trace_kvm_mmu_spte_requested(gpa, level, pfn);
        for_each_shadow_entry(vcpu, gpa, it) {
index cbb6ed3..ba9af7c 100644 (file)
@@ -636,7 +636,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
        struct kvm_mmu_page *sp = NULL;
        struct kvm_shadow_walk_iterator it;
        unsigned direct_access, access = gw->pt_access;
-       int top_level, hlevel, ret;
+       int top_level, hlevel, req_level, ret;
        gfn_t base_gfn = gw->gfn;
 
        direct_access = gw->pte_access;
@@ -682,10 +682,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
                        link_shadow_page(vcpu, it.sptep, sp);
        }
 
-       if (huge_page_disallowed)
-               max_level = PG_LEVEL_4K;
-
-       hlevel = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn);
+       hlevel = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn,
+                                        huge_page_disallowed, &req_level);
 
        trace_kvm_mmu_spte_requested(addr, gw->level, pfn);