u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
+bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
int kvm_check_pvm_sysreg_table(void);
-void inject_undef64(struct kvm_vcpu *vcpu);
#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */
}
/**
- * Handler for protected VM restricted exceptions.
- *
- * Inject an undefined exception into the guest and return true to indicate that
- * the hypervisor has handled the exit, and control should go back to the guest.
- */
-static bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
-{
- inject_undef64(vcpu);
- return true;
-}
-
-/**
* Handler for protected VM MSR, MRS or System instruction execution in AArch64.
*
* Returns true if the hypervisor has handled the exit, and control should go
* Inject an unknown/undefined exception to an AArch64 guest while most of its
* sysregs are live.
*/
-void inject_undef64(struct kvm_vcpu *vcpu)
+static void inject_undef64(struct kvm_vcpu *vcpu)
{
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
return true;
}
+
+/**
+ * Handler for protected VM restricted exceptions.
+ *
+ * Inject an undefined exception into the guest and return true to indicate that
+ * the hypervisor has handled the exit, and control should go back to the guest.
+ */
+bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ inject_undef64(vcpu);
+ return true;
+}