OSDN Git Service

arch/x86/kvm/vmx.c: use PAGE_ALIGNED instead of IS_ALIGNED(PAGE_SIZE
authorFabian Frederick <fabf@skynet.be>
Sat, 14 Jun 2014 21:44:29 +0000 (23:44 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 19 Jun 2014 10:52:08 +0000 (12:52 +0200)
use mm.h definition

Cc: Gleb Natapov <gleb@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Fabian Frederick <fabf@skynet.be>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx.c

index 801332e..4f84be0 100644 (file)
@@ -5918,7 +5918,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
                 * which replaces physical address width with 32
                 *
                 */
-               if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
+               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
                        nested_vmx_failInvalid(vcpu);
                        skip_emulated_instruction(vcpu);
                        return 1;
@@ -5936,7 +5936,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
                vmx->nested.vmxon_ptr = vmptr;
                break;
        case EXIT_REASON_VMCLEAR:
-               if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
+               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
                        nested_vmx_failValid(vcpu,
                                             VMXERR_VMCLEAR_INVALID_ADDRESS);
                        skip_emulated_instruction(vcpu);
@@ -5951,7 +5951,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
                }
                break;
        case EXIT_REASON_VMPTRLD:
-               if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
+               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
                        nested_vmx_failValid(vcpu,
                                             VMXERR_VMPTRLD_INVALID_ADDRESS);
                        skip_emulated_instruction(vcpu);
@@ -8113,14 +8113,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        }
 
        if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
-                       !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
+                       !PAGE_ALIGNED(vmcs12->msr_bitmap)) {
                /*TODO: Also verify bits beyond physical address width are 0*/
                nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
                return 1;
        }
 
        if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
-                       !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
+                       !PAGE_ALIGNED(vmcs12->apic_access_addr)) {
                /*TODO: Also verify bits beyond physical address width are 0*/
                nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
                return 1;