OSDN Git Service

KVM: selftests: Unconditionally allocate EPT tables in memslot 0
authorSean Christopherson <seanjc@google.com>
Tue, 22 Jun 2021 20:05:23 +0000 (13:05 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 24 Jun 2021 15:47:48 +0000 (11:47 -0400)
Drop the EPTP memslot param from all EPT helpers and shove the hardcoded
'0' down to the vm_phy_page_alloc() calls.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-14-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/include/x86_64/vmx.h
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c

index 516c81d..583ceb0 100644 (file)
@@ -608,15 +608,13 @@ bool nested_vmx_supported(void);
 void nested_vmx_check_supported(void);
 
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-                  uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
+                  uint64_t nested_paddr, uint64_t paddr);
 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-                uint64_t nested_paddr, uint64_t paddr, uint64_t size,
-                uint32_t eptp_memslot);
+                uint64_t nested_paddr, uint64_t paddr, uint64_t size);
 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
-                       uint32_t memslot, uint32_t eptp_memslot);
+                       uint32_t memslot);
 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
                  uint32_t eptp_memslot);
-void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm,
-                                     uint32_t eptp_memslot);
+void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
 
 #endif /* SELFTEST_KVM_VMX_H */
index d568d8c..1d26c39 100644 (file)
@@ -393,7 +393,7 @@ void nested_vmx_check_supported(void)
 }
 
 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-                  uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
+                  uint64_t nested_paddr, uint64_t paddr)
 {
        uint16_t index[4];
        struct eptPageTableEntry *pml4e;
@@ -427,7 +427,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
        pml4e = vmx->eptp_hva;
        if (!pml4e[index[3]].readable) {
                pml4e[index[3]].address = vm_phy_page_alloc(vm,
-                         KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
+                         KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
                        >> vm->page_shift;
                pml4e[index[3]].writable = true;
                pml4e[index[3]].readable = true;
@@ -439,7 +439,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
        pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
        if (!pdpe[index[2]].readable) {
                pdpe[index[2]].address = vm_phy_page_alloc(vm,
-                         KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
+                         KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
                        >> vm->page_shift;
                pdpe[index[2]].writable = true;
                pdpe[index[2]].readable = true;
@@ -451,7 +451,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
        pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
        if (!pde[index[1]].readable) {
                pde[index[1]].address = vm_phy_page_alloc(vm,
-                         KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
+                         KVM_EPT_PAGE_TABLE_MIN_PADDR, 0)
                        >> vm->page_shift;
                pde[index[1]].writable = true;
                pde[index[1]].readable = true;
@@ -492,8 +492,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
  * page range starting at nested_paddr to the page range starting at paddr.
  */
 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
-               uint64_t nested_paddr, uint64_t paddr, uint64_t size,
-               uint32_t eptp_memslot)
+               uint64_t nested_paddr, uint64_t paddr, uint64_t size)
 {
        size_t page_size = vm->page_size;
        size_t npages = size / page_size;
@@ -502,7 +501,7 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
        TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
 
        while (npages--) {
-               nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot);
+               nested_pg_map(vmx, vm, nested_paddr, paddr);
                nested_paddr += page_size;
                paddr += page_size;
        }
@@ -512,7 +511,7 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
  * physical pages in VM.
  */
 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
-                       uint32_t memslot, uint32_t eptp_memslot)
+                       uint32_t memslot)
 {
        sparsebit_idx_t i, last;
        struct userspace_mem_region *region =
@@ -528,8 +527,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
                nested_map(vmx, vm,
                           (uint64_t)i << vm->page_shift,
                           (uint64_t)i << vm->page_shift,
-                          1 << vm->page_shift,
-                          eptp_memslot);
+                          1 << vm->page_shift);
        }
 }
 
@@ -541,8 +539,7 @@ void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
        vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
 }
 
-void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm,
-                                     uint32_t eptp_memslot)
+void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm)
 {
        vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
        vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
index d14888b..d438c4d 100644 (file)
@@ -96,7 +96,7 @@ int main(int argc, char *argv[])
        }
 
        vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
-       prepare_virtualize_apic_accesses(vmx, vm, 0);
+       prepare_virtualize_apic_accesses(vmx, vm);
        vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa);
 
        while (!done) {
index 18f6361..06a6498 100644 (file)
@@ -107,9 +107,9 @@ int main(int argc, char *argv[])
         * meaning after the last call to virt_map.
         */
        prepare_eptp(vmx, vm, 0);
-       nested_map_memslot(vmx, vm, 0, 0);
-       nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
-       nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
+       nested_map_memslot(vmx, vm, 0);
+       nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
+       nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
 
        bmap = bitmap_alloc(TEST_MEM_PAGES);
        host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);