OSDN Git Service

KVM: VMX: Track common EPTP for Hyper-V's paravirt TLB flush
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 5 Mar 2021 18:31:14 +0000 (10:31 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 15 Mar 2021 08:43:57 +0000 (04:43 -0400)
Explicitly track the EPTP that is common to all vCPUs instead of
grabbing vCPU0's EPTP when invoking Hyper-V's paravirt TLB flush.
Tracking the EPTP will allow optimizing the checks when loading a new
EPTP and will also allow dropping ept_pointer_match, e.g. by marking
the common EPTP as invalid.

This also technically fixes a bug where KVM could theoretically flush an
invalid GPA if all vCPUs have an invalid root.  In practice, it's likely
impossible to trigger a remote TLB flush in such a scenario.  In any
case, the superfluous flush is completely benign.

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210305183123.3978098-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index 0c36cb6..8ba25b1 100644 (file)
@@ -483,12 +483,14 @@ static void check_ept_pointer_match(struct kvm *kvm)
                if (!VALID_PAGE(tmp_eptp)) {
                        tmp_eptp = to_vmx(vcpu)->ept_pointer;
                } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
+                       to_kvm_vmx(kvm)->hv_tlb_eptp = INVALID_PAGE;
                        to_kvm_vmx(kvm)->ept_pointers_match
                                = EPT_POINTERS_MISMATCH;
                        return;
                }
        }
 
+       to_kvm_vmx(kvm)->hv_tlb_eptp = tmp_eptp;
        to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
 }
 
@@ -501,21 +503,18 @@ static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush
                        range->pages);
 }
 
-static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
-               struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
+static inline int hv_remote_flush_eptp(u64 eptp, struct kvm_tlb_range *range)
 {
-       u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
-
        /*
         * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address
         * of the base of EPT PML4 table, strip off EPT configuration
         * information.
         */
        if (range)
-               return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK,
+               return hyperv_flush_guest_mapping_range(eptp & PAGE_MASK,
                                kvm_fill_hv_flush_list_func, (void *)range);
        else
-               return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK);
+               return hyperv_flush_guest_mapping(eptp & PAGE_MASK);
 }
 
 static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
@@ -533,12 +532,11 @@ static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        /* If ept_pointer is invalid pointer, bypass flush request. */
                        if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
-                               ret |= __hv_remote_flush_tlb_with_range(
-                                       kvm, vcpu, range);
+                               ret |= hv_remote_flush_eptp(to_vmx(vcpu)->ept_pointer,
+                                                           range);
                }
-       } else {
-               ret = __hv_remote_flush_tlb_with_range(kvm,
-                               kvm_get_vcpu(kvm, 0), range);
+       } else if (VALID_PAGE(to_kvm_vmx(kvm)->hv_tlb_eptp)) {
+               ret = hv_remote_flush_eptp(to_kvm_vmx(kvm)->hv_tlb_eptp, range);
        }
 
        spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
index 4795955..f846cf3 100644 (file)
@@ -351,6 +351,7 @@ struct kvm_vmx {
        bool ept_identity_pagetable_done;
        gpa_t ept_identity_map_addr;
 
+       hpa_t hv_tlb_eptp;
        enum ept_pointers_status ept_pointers_match;
        spinlock_t ept_pointer_lock;
 };