OSDN Git Service

KVM: x86/mmu: Rename __direct_map() to direct_map()
authorDavid Matlack <dmatlack@google.com>
Wed, 21 Sep 2022 17:35:46 +0000 (10:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 27 Dec 2022 11:03:01 +0000 (06:03 -0500)
Rename __direct_map() to direct_map() since the leading underscores are
unnecessary. This also makes the page fault handler names more
consistent: kvm_tdp_mmu_page_fault() calls kvm_tdp_mmu_map() and
direct_page_fault() calls direct_map().

Opportunistically make some trivial cleanups to comments that had to be
modified anyway since they mentioned __direct_map(). Specifically, use
"()" when referring to functions, and include kvm_tdp_mmu_map() among
the various callers of disallowed_hugepage_adjust().

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220921173546.2674386-11-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h

index dfac473..7fb7a07 100644 (file)
@@ -3131,11 +3131,11 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_
            !is_large_pte(spte) &&
            spte_to_child_sp(spte)->nx_huge_page_disallowed) {
                /*
-                * A small SPTE exists for this pfn, but FNAME(fetch)
-                * and __direct_map would like to create a large PTE
-                * instead: just force them to go down another level,
-                * patching back for them into pfn the next 9 bits of
-                * the address.
+                * A small SPTE exists for this pfn, but FNAME(fetch),
+                * direct_map(), or kvm_tdp_mmu_map() would like to create a
+                * large PTE instead: just force them to go down another level,
+                * patching back for them into pfn the next 9 bits of the
+                * address.
                 */
                u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
                                KVM_PAGES_PER_HPAGE(cur_level - 1);
@@ -3144,7 +3144,7 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_
        }
 }
 
-static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
        struct kvm_shadow_walk_iterator it;
        struct kvm_mmu_page *sp;
@@ -4330,7 +4330,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        if (r)
                goto out_unlock;
 
-       r = __direct_map(vcpu, fault);
+       r = direct_map(vcpu, fault);
 
 out_unlock:
        write_unlock(&vcpu->kvm->mmu_lock);
index 0698907..ac00bfb 100644 (file)
@@ -199,7 +199,7 @@ struct kvm_page_fault {
 
        /*
         * Maximum page size that can be created for this fault; input to
-        * FNAME(fetch), __direct_map and kvm_tdp_mmu_map.
+        * FNAME(fetch), direct_map() and kvm_tdp_mmu_map().
         */
        u8 max_level;