OSDN Git Service

KVM: x86: VMX: Make smaller physical guest address space support user-configurable
authorMohammed Gamal <mgamal@redhat.com>
Thu, 3 Sep 2020 14:11:22 +0000 (16:11 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 23 Sep 2020 13:47:24 +0000 (09:47 -0400)
This patch exposes allow_smaller_maxphyaddr to the user as a module parameter.
Since smaller physical address spaces are only supported on VMX, the
parameter is only exposed in the kvm_intel module.

For now disable support by default, and let the user decide if they want
to enable it.

Modifications to VMX page fault and EPT violation handling will depend
on whether that parameter is enabled.

Signed-off-by: Mohammed Gamal <mgamal@redhat.com>
Message-Id: <20200903141122.72908-1-mgamal@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c

index 75cd720..f0384e9 100644 (file)
@@ -129,6 +129,9 @@ static bool __read_mostly enable_preemption_timer = 1;
 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 #endif
 
+extern bool __read_mostly allow_smaller_maxphyaddr;
+module_param(allow_smaller_maxphyaddr, bool, S_IRUGO);
+
 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
 #define KVM_VM_CR0_ALWAYS_ON                           \
@@ -4803,6 +4806,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
                         * EPT will cause page fault only if we need to
                         * detect illegal GPAs.
                         */
+                       WARN_ON_ONCE(!allow_smaller_maxphyaddr);
                        kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code);
                        return 1;
                } else
@@ -5331,7 +5335,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
         * would also use advanced VM-exit information for EPT violations to
         * reconstruct the page fault error code.
         */
-       if (unlikely(kvm_mmu_is_illegal_gpa(vcpu, gpa)))
+       if (unlikely(allow_smaller_maxphyaddr && kvm_mmu_is_illegal_gpa(vcpu, gpa)))
                return kvm_emulate_instruction(vcpu, 0);
 
        return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
@@ -8305,11 +8309,12 @@ static int __init vmx_init(void)
        vmx_check_vmcs12_offsets();
 
        /*
-        * Intel processors don't have problems with
-        * GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable
-        * it for VMX by default
+        * Shadow paging doesn't have a (further) performance penalty
+        * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
+        * by default
         */
-       allow_smaller_maxphyaddr = true;
+       if (!enable_ept)
+               allow_smaller_maxphyaddr = true;
 
        return 0;
 }
index a2f8212..a0e4772 100644 (file)
@@ -552,7 +552,10 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
 
 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
 {
-       return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
+       if (!enable_ept)
+               return true;
+
+       return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
 }
 
 void dump_vmcs(void);
index e3de0fe..6736260 100644 (file)
@@ -188,7 +188,7 @@ static struct kvm_shared_msrs __percpu *shared_msrs;
 u64 __read_mostly host_efer;
 EXPORT_SYMBOL_GPL(host_efer);
 
-bool __read_mostly allow_smaller_maxphyaddr;
+bool __read_mostly allow_smaller_maxphyaddr = 0;
 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
 
 static u64 __read_mostly host_xss;