DECLARE_KVM_VHE_SYM(sym); \
DECLARE_KVM_NVHE_SYM(sym)
-#define CHOOSE_VHE_SYM(sym) sym
-#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
-
+ #define DECLARE_KVM_VHE_PER_CPU(type, sym) \
+ DECLARE_PER_CPU(type, sym)
+ #define DECLARE_KVM_NVHE_PER_CPU(type, sym) \
+ DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
+
+ #define DECLARE_KVM_HYP_PER_CPU(type, sym) \
+ DECLARE_KVM_VHE_PER_CPU(type, sym); \
+ DECLARE_KVM_NVHE_PER_CPU(type, sym)
+
-#ifndef __KVM_NVHE_HYPERVISOR__
+ /*
+ * Compute pointer to a symbol defined in nVHE percpu region.
+ * Returns NULL if percpu memory has not been allocated yet.
+ */
+ #define this_cpu_ptr_nvhe_sym(sym) per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
+ #define per_cpu_ptr_nvhe_sym(sym, cpu) \
+ ({ \
+ unsigned long base, off; \
+ base = kvm_arm_hyp_percpu_base[cpu]; \
+ off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
+ (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
+ base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
+ })
+
- #define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
+#if defined(__KVM_NVHE_HYPERVISOR__)
+
- #define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
+#define CHOOSE_NVHE_SYM(sym) sym
++#define CHOOSE_HYP_SYM(sym) CHOOSE_NVHE_SYM(sym)
++
+/* The nVHE hypervisor shouldn't even try to access VHE symbols */
+extern void *__nvhe_undefined_symbol;
- #elif defined(__KVM_VHE_HYPERVISOR)
++#define CHOOSE_VHE_SYM(sym) __nvhe_undefined_symbol
++#define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol)
++#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
+
- #define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
++#elif defined(__KVM_VHE_HYPERVISOR__)
+
- #define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
+#define CHOOSE_VHE_SYM(sym) sym
++#define CHOOSE_HYP_SYM(sym) CHOOSE_VHE_SYM(sym)
++
+/* The VHE hypervisor shouldn't even try to access nVHE symbols */
+extern void *__vhe_undefined_symbol;
++#define CHOOSE_NVHE_SYM(sym) __vhe_undefined_symbol
++#define this_cpu_ptr_hyp_sym(sym) (&__vhe_undefined_symbol)
++#define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
+
+#else
+
/*
* BIG FAT WARNINGS:
*
* - Don't let the nVHE hypervisor have access to this, as it will
* pick the *wrong* symbol (yes, it runs at EL2...).
*/
- #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
+ #define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() \
+ ? CHOOSE_VHE_SYM(sym) \
: CHOOSE_NVHE_SYM(sym))
-#else
-/* The nVHE hypervisor shouldn't even try to access anything */
-extern void *__nvhe_undefined_symbol;
-#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol
-#define this_cpu_ptr_hyp_sym(sym) (&__nvhe_undefined_symbol)
-#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
++
+ #define this_cpu_ptr_hyp_sym(sym) (is_kernel_in_hyp_mode() \
+ ? this_cpu_ptr(&sym) \
+ : this_cpu_ptr_nvhe_sym(sym))
++
+ #define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode() \
+ ? per_cpu_ptr(&sym, cpu) \
+ : per_cpu_ptr_nvhe_sym(sym, cpu))
++
+#define CHOOSE_VHE_SYM(sym) sym
+#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
+
#endif
/* Translate a kernel address @ptr into its equivalent linear mapping */
struct kvm_s2_mmu;
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
+DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
+#define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
- #ifdef CONFIG_KVM_INDIRECT_VECTORS
+ extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
+ DECLARE_KVM_NVHE_SYM(__per_cpu_start);
+ DECLARE_KVM_NVHE_SYM(__per_cpu_end);
+
extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm
- hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
+.macro get_loaded_vcpu vcpu, ctxt
- hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
++ adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
+ ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+.endm
+
+.macro set_loaded_vcpu vcpu, ctxt, tmp
++ adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
+ str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
+.endm
+
/*
* KVM extable for unexpected exceptions.
* In the same format _asm_extable, but output to a different section so that
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
- DECLARE_PER_CPU(struct kvm_host_data, kvm_host_data);
-DECLARE_KVM_HYP_PER_CPU(kvm_host_data_t, kvm_host_data);
++DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
return vect;
}
- /* This is only called on a !VHE system */
- static inline int kvm_map_vectors(void)
- {
- /*
- * HBP = ARM64_HARDEN_BRANCH_PREDICTOR
- * HEL2 = ARM64_HARDEN_EL2_VECTORS
- *
- * !HBP + !HEL2 -> use direct vectors
- * HBP + !HEL2 -> use hardened vectors in place
- * !HBP + HEL2 -> allocate one vector slot and use exec mapping
- * HBP + HEL2 -> use hardened vertors and use exec mapping
- */
- if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
- __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
- __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
- }
-
- if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
- phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
- unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
-
- /*
- * Always allocate a spare vector slot, as we don't
- * know yet which CPUs have a BP hardening slot that
- * we can reuse.
- */
- __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
- BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
- return create_hyp_exec_mappings(vect_pa, size,
- &__kvm_bp_vect_base);
- }
-
- return 0;
- }
- #else
- static inline void *kvm_get_hyp_vector(void)
- {
- return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
- }
-
- static inline int kvm_map_vectors(void)
- {
- return 0;
- }
- #endif
-
- #ifdef CONFIG_ARM64_SSBD
- DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
- static inline int hyp_map_aux_data(void)
- {
- int cpu, err;
-
- for_each_possible_cpu(cpu) {
- u64 *ptr;
-
- ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
- err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
- if (err)
- return err;
- }
- return 0;
- }
- #else
- static inline int hyp_map_aux_data(void)
- {
- return 0;
- }
- #endif
-
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
-/*
- * Get the magic number 'x' for VTTBR:BADDR of this KVM instance.
- * With v8.2 LVA extensions, 'x' should be a minimum of 6 with
- * 52bit IPS.
- */
-static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
-{
- int x = ARM64_VTTBR_X(ipa_shift, levels);
-
- return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
-}
-
-static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
-{
- unsigned int x = arm64_vttbr_x(ipa_shift, levels);
-
- return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
-}
-
-static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
-{
- return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
-}
-
static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
{
struct kvm_vmid *vmid = &mmu->vmid;
__asm__(".arch_extension virt");
#endif
- DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
- DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
- DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
++DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
++
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+ unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
cpu_hyp_reset();
- __this_cpu_write(kvm_hyp_vector, (unsigned long)kvm_get_hyp_vector());
++ *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector();
+
if (is_kernel_in_hyp_mode())
kvm_timer_init_vhe();
else
-DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN)
- obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
- obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
-obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o
++obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o smccc_wa.o
*/
SYM_FUNC_START(__guest_enter)
// x0: vcpu
- // x1: host context
- // x2-x17: clobbered by macros
+ // x1-x17: clobbered by macros
// x29: guest context
- hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2
- // Store the host regs
++ adr_this_cpu x1, kvm_hyp_ctxt, x2
+
+ // Store the hyp regs
save_callee_saved_regs x1
- // Save the host's sp_el0
+ // Save hyp's sp_el0
save_sp_el0 x1, x2
- // Now the host state is stored if we have a pending RAS SError it must
- // affect the host. If any asynchronous exception is pending we defer
- // the guest entry. The DSB isn't necessary before v8.2 as any SError
- // would be fatal.
+ // Now the hyp state is stored if we have a pending RAS SError it must
+ // affect the host or hyp. If any asynchronous exception is pending we
+ // defer the guest entry. The DSB isn't necessary before v8.2 as any
+ // SError would be fatal.
alternative_if ARM64_HAS_RAS_EXTN
dsb nshst
isb
eret
sb
- hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1
+SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
+ // x2-x29,lr: vcpu regs
+ // vcpu x0-x1 on the stack
+
+ // If the hyp context is loaded, go straight to hyp_panic
+ get_loaded_vcpu x0, x1
+ cbz x0, hyp_panic
+
+ // The hyp context is saved so make sure it is restored to allow
+ // hyp_panic to run at hyp and, subsequently, panic to run in the host.
+ // This makes use of __guest_exit to avoid duplication but sets the
+ // return address to tail call into hyp_panic. As a side effect, the
+ // current state is saved to the guest context but it will only be
+ // accurate if the guest had been completely restored.
++ adr_this_cpu x0, kvm_hyp_ctxt, x1
+ adr x1, hyp_panic
+ str x1, [x0, #CPU_XREG_OFFSET(30)]
+
+ get_vcpu_ptr x1, x0
+
SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// x0: return code
// x1: vcpu
// Store the guest's sp_el0
save_sp_el0 x1, x2
- hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3
- get_host_ctxt x2, x3
++ adr_this_cpu x2, kvm_hyp_ctxt, x3
- // Macro ptrauth_switch_to_guest format:
- // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
+ // Macro ptrauth_switch_to_hyp format:
+ // ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
// The below macro to save/restore keys is not implemented in C code
// as it may cause Pointer Authentication key signing mismatch errors
// when this feature is enabled for kernel code.
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
return false;
- ctxt = __hyp_this_cpu_ptr(kvm_hyp_ctxt);
- ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
++ ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
__ptrauth_save_key(ctxt, APIA);
__ptrauth_save_key(ctxt, APIB);
__ptrauth_save_key(ctxt, APDA);
return false;
}
- static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
- {
- if (!cpus_have_final_cap(ARM64_SSBD))
- return false;
-
- return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
- }
-
- static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
- {
- #ifdef CONFIG_ARM64_SSBD
- /*
- * The host runs with the workaround always present. If the
- * guest wants it disabled, so be it...
- */
- if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
- #endif
- }
-
- static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
- {
- #ifdef CONFIG_ARM64_SSBD
- /*
- * If the guest has disabled the workaround, bring it back on.
- */
- if (__needs_ssbd_off(vcpu) &&
- __hyp_this_cpu_read(arm64_ssbd_callback_required))
- arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
- #endif
- }
-
static inline void __kvm_unexpected_el2_exception(void)
{
+ extern char __guest_exit_panic[];
unsigned long addr, fixup;
- struct kvm_cpu_context *host_ctxt;
struct exception_table_entry *entry, *end;
unsigned long elr_el2 = read_sysreg(elr_el2);
#include <asm/processor.h>
#include <asm/thread_info.h>
-/* Non-VHE copy of the kernel symbol. */
-DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-/* Non-VHE instance of kvm_host_data. */
-DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
++/* Non-VHE specific context */
++DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
++DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
++DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
}
write_sysreg(val, cptr_el2);
- write_sysreg(__hyp_this_cpu_read(kvm_hyp_vector), vbar_el2);
++ write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
pmr_sync();
}
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
- vcpu = kern_hyp_va(vcpu);
-
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
__debug_switch_to_guest(vcpu);
- __set_guest_arch_workaround_state(vcpu);
-
do {
/* Jump in the fire! */
- exit_code = __guest_enter(vcpu, host_ctxt);
+ exit_code = __guest_enter(vcpu);
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg(par_el1);
- struct kvm_vcpu *vcpu = host_ctxt->__hyp_running_vcpu;
- unsigned long str_va;
+ bool restore_host = true;
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_vcpu *vcpu;
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
- if (read_sysreg(vttbr_el2)) {
++ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+ vcpu = host_ctxt->__hyp_running_vcpu;
+
+ if (vcpu) {
__timer_disable_traps(vcpu);
__deactivate_traps(vcpu);
- __deactivate_vm(vcpu);
+ __load_host_stage2();
__sysreg_restore_state_nvhe(host_ctxt);
}
const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
-/* VHE instance of kvm_host_data. */
-DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
++/* VHE specific context */
++DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
++DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
++DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
- __set_guest_arch_workaround_state(vcpu);
-
do {
/* Jump in the fire! */
- exit_code = __guest_enter(vcpu, host_ctxt);
+ exit_code = __guest_enter(vcpu);
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
return ret;
}
-static void __hyp_call_panic(u64 spsr, u64 elr, u64 par,
- struct kvm_cpu_context *host_ctxt)
+static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
{
+ struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
- host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+
++ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
vcpu = host_ctxt->__hyp_running_vcpu;
__deactivate_traps(vcpu);