OSDN Git Service

KVM: arm64: Try PMD block mappings if PUD mappings are not supported
authorAlexandru Elisei <alexandru.elisei@arm.com>
Thu, 10 Sep 2020 13:33:51 +0000 (14:33 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 18 Sep 2020 15:10:15 +0000 (16:10 +0100)
When userspace uses hugetlbfs for the VM memory, user_mem_abort() tries to
use the same block size to map the faulting IPA in stage 2. If stage 2
cannot the same block mapping because the block size doesn't fit in the
memslot or the memslot is not properly aligned, user_mem_abort() will fall
back to a page mapping, regardless of the block size. We can do better for
PUD backed hugetlbfs by checking if a PMD block mapping is supported before
deciding to use a page.

vma_pagesize is an unsigned long, use 1UL instead of 1ULL when assigning
its value.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200910133351.118191-1-alexandru.elisei@arm.com
arch/arm64/kvm/mmu.c

index 21b70ab..852497b 100644 (file)
@@ -779,16 +779,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        else
                vma_shift = PAGE_SHIFT;
 
-       vma_pagesize = 1ULL << vma_shift;
        if (logging_active ||
-           (vma->vm_flags & VM_PFNMAP) ||
-           !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
+           (vma->vm_flags & VM_PFNMAP)) {
                force_pte = true;
-               vma_pagesize = PAGE_SIZE;
+               vma_shift = PAGE_SHIFT;
+       }
+
+       if (vma_shift == PUD_SHIFT &&
+           !fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
+              vma_shift = PMD_SHIFT;
+
+       if (vma_shift == PMD_SHIFT &&
+           !fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
+               force_pte = true;
+               vma_shift = PAGE_SHIFT;
        }
 
+       vma_pagesize = 1UL << vma_shift;
        if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
-               fault_ipa &= huge_page_mask(hstate_vma(vma));
+               fault_ipa &= ~(vma_pagesize - 1);
 
        gfn = fault_ipa >> PAGE_SHIFT;
        mmap_read_unlock(current->mm);