vcpu->arch.reserved_gpa_bits, execonly);
}
+static inline u64 reserved_hpa_bits(void)
+{
+ return rsvd_bits(shadow_phys_bits, 63);
+}
+
/*
* the page table on host is the shadow page table for the page
* table in guest or amd nested guest, its mmu features completely
*/
shadow_zero_check = &context->shadow_zero_check;
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- rsvd_bits(shadow_phys_bits, 63),
+ reserved_hpa_bits(),
context->shadow_root_level, uses_nx,
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
is_pse(vcpu), true);
if (boot_cpu_is_amd())
__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- rsvd_bits(shadow_phys_bits, 63),
+ reserved_hpa_bits(),
context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES),
true, true);
else
__reset_rsvds_bits_mask_ept(shadow_zero_check,
- rsvd_bits(shadow_phys_bits, 63),
- false);
+ reserved_hpa_bits(), false);
if (!shadow_me_mask)
return;
struct kvm_mmu *context, bool execonly)
{
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
- rsvd_bits(shadow_phys_bits, 63), execonly);
+ reserved_hpa_bits(), execonly);
}
#define BYTE_MASK(access) \