OSDN Git Service

KVM: PPC: Book3S HV: Rename kvm_alloc_hpt() for clarity
authorDavid Gibson <david@gibson.dropbear.id.au>
Tue, 20 Dec 2016 05:48:59 +0000 (16:48 +1100)
committerPaul Mackerras <paulus@ozlabs.org>
Tue, 31 Jan 2017 10:59:23 +0000 (21:59 +1100)
The difference between kvm_alloc_hpt() and kvmppc_alloc_hpt() is not at
all obvious from the name.  In practice kvmppc_alloc_hpt() allocates an HPT
by whatever means, and calls kvm_alloc_hpt() which will attempt to allocate
it with CMA only.

To make this less confusing, rename kvm_alloc_hpt() to kvm_alloc_hpt_cma().
Similarly, kvm_release_hpt() is renamed kvm_free_hpt_cma().

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv_builtin.c

index 48c760f..6fad1f1 100644 (file)
@@ -186,8 +186,8 @@ extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
                unsigned long tce_value, unsigned long npages);
 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                             unsigned long ioba);
-extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
-extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
+extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
+extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
 extern int kvmppc_core_init_vm(struct kvm *kvm);
 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
 extern void kvmppc_core_free_memslot(struct kvm *kvm,
index 9df3d94..16f2784 100644 (file)
@@ -62,7 +62,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
        }
 
        kvm->arch.hpt_cma_alloc = 0;
-       page = kvm_alloc_hpt(1ul << (order - PAGE_SHIFT));
+       page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
        if (page) {
                hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
                memset((void *)hpt, 0, (1ul << order));
@@ -108,7 +108,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
 
  out_freehpt:
        if (kvm->arch.hpt_cma_alloc)
-               kvm_release_hpt(page, 1 << (order - PAGE_SHIFT));
+               kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
        else
                free_pages(hpt, order - PAGE_SHIFT);
        return -ENOMEM;
@@ -157,8 +157,8 @@ void kvmppc_free_hpt(struct kvm *kvm)
 {
        vfree(kvm->arch.revmap);
        if (kvm->arch.hpt_cma_alloc)
-               kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt),
-                               1 << (kvm->arch.hpt_order - PAGE_SHIFT));
+               kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt_virt),
+                                1 << (kvm->arch.hpt_order - PAGE_SHIFT));
        else if (kvm->arch.hpt_virt)
                free_pages(kvm->arch.hpt_virt,
                           kvm->arch.hpt_order - PAGE_SHIFT);
index fe08fea..96e7e60 100644 (file)
@@ -57,19 +57,19 @@ static int __init early_parse_kvm_cma_resv(char *p)
 }
 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
 
-struct page *kvm_alloc_hpt(unsigned long nr_pages)
+struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
 {
        VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
        return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
 }
-EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
+EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
 
-void kvm_release_hpt(struct page *page, unsigned long nr_pages)
+void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
 {
        cma_release(kvm_cma, page, nr_pages);
 }
-EXPORT_SYMBOL_GPL(kvm_release_hpt);
+EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
 
 /**
  * kvm_cma_reserve() - reserve area for kvm hash pagetable