return kvm_read_cr3(vcpu);
}
-static void inject_page_fault(struct kvm_vcpu *vcpu,
- struct x86_exception *fault)
-{
- vcpu->arch.mmu->inject_page_fault(vcpu, fault);
-}
-
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned int access, int *nr_present)
{
if (!r) {
pgprintk("%s: guest page fault\n", __func__);
if (!prefault)
- inject_page_fault(vcpu, &walker.fault);
+ kvm_inject_emulated_page_fault(vcpu, &walker.fault);
return RET_PF_RETRY;
}
bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
+ struct kvm_mmu *fault_mmu;
WARN_ON_ONCE(fault->vector != PF_VECTOR);
- if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
- vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
- else
- vcpu->arch.mmu->inject_page_fault(vcpu, fault);
+ fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
+ vcpu->arch.walk_mmu;
+ fault_mmu->inject_page_fault(vcpu, fault);
return fault->nested_page_fault;
}