1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/assembler.h>
12 #include <asm/cpufeature.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_mmu.h>
18 .macro save_caller_saved_regs_vect
19 /* x0 and x1 were saved in the vector entry */
20 stp x2, x3, [sp, #-16]!
21 stp x4, x5, [sp, #-16]!
22 stp x6, x7, [sp, #-16]!
23 stp x8, x9, [sp, #-16]!
24 stp x10, x11, [sp, #-16]!
25 stp x12, x13, [sp, #-16]!
26 stp x14, x15, [sp, #-16]!
27 stp x16, x17, [sp, #-16]!
30 .macro restore_caller_saved_regs_vect
31 ldp x16, x17, [sp], #16
32 ldp x14, x15, [sp], #16
33 ldp x12, x13, [sp], #16
34 ldp x10, x11, [sp], #16
46 * Shuffle the parameters before calling the function
47 * pointed to in x0. Assumes parameters in x[1,2,3].
58 el1_sync: // Guest trapped into EL2
61 lsr x0, x0, #ESR_ELx_EC_SHIFT
62 cmp x0, #ESR_ELx_EC_HVC64
63 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
66 #ifdef __KVM_NVHE_HYPERVISOR__
67 mrs x1, vttbr_el2 // If vttbr is valid, the guest
68 cbnz x1, el1_hvc_guest // called HVC
70 /* Here, we're pretty sure the host called HVC. */
73 /* Check for a stub HVC call */
74 cmp x0, #HVC_STUB_HCALL_NR
78 * Compute the idmap address of __kvm_handle_stub_hvc and
79 * jump there. Since we use kimage_voffset, do not use the
80 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
81 * (by loading it from the constant pool).
83 * Preserve x0-x4, which may contain stub parameters.
85 ldr x5, =__kvm_handle_stub_hvc
86 ldr_l x6, kimage_voffset
94 * Perform the EL2 call
101 #endif /* __KVM_NVHE_HYPERVISOR__ */
105 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
106 * The workaround has already been applied on the host,
107 * so let's quickly get back to the guest. We don't bother
108 * restoring x1, as it can be clobbered anyway.
110 ldr x1, [sp] // Guest's x0
111 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
114 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
115 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
116 ARM_SMCCC_ARCH_WORKAROUND_2)
119 #ifdef CONFIG_ARM64_SSBD
120 alternative_cb arm64_enable_wa2_handling
124 ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
126 // Sanitize the argument and update the guest flags
127 ldr x1, [sp, #8] // Guest's x1
128 clz w1, w1 // Murphy's device:
129 lsr w1, w1, #5 // w1 = !!w1 without using
130 eor w1, w1, #1 // the flags...
131 bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
132 str x0, [x2, #VCPU_WORKAROUND_FLAGS]
134 /* Check that we actually need to perform the call */
135 hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
138 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
141 /* Don't leak data from the SMC call */
156 mov x0, #ARM_EXCEPTION_TRAP
161 mov x0, #ARM_EXCEPTION_IRQ
166 mov x0, #ARM_EXCEPTION_EL1_SERROR
170 /* Check for illegal exception return */
174 save_caller_saved_regs_vect
175 stp x29, x30, [sp, #-16]!
176 bl kvm_unexpected_el2_exception
177 ldp x29, x30, [sp], #16
178 restore_caller_saved_regs_vect
183 /* Let's attempt a recovery from the illegal exception return */
185 mov x0, #ARM_EXCEPTION_IL
190 save_caller_saved_regs_vect
191 stp x29, x30, [sp, #-16]!
193 bl kvm_unexpected_el2_exception
195 ldp x29, x30, [sp], #16
196 restore_caller_saved_regs_vect
201 #ifdef __KVM_NVHE_HYPERVISOR__
202 SYM_FUNC_START(__hyp_do_panic)
203 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
210 SYM_FUNC_END(__hyp_do_panic)
213 SYM_CODE_START(__hyp_panic)
216 SYM_CODE_END(__hyp_panic)
218 .macro invalid_vector label, target = __hyp_panic
220 SYM_CODE_START(\label)
225 /* None of these should ever happen */
226 invalid_vector el2t_sync_invalid
227 invalid_vector el2t_irq_invalid
228 invalid_vector el2t_fiq_invalid
229 invalid_vector el2t_error_invalid
230 invalid_vector el2h_sync_invalid
231 invalid_vector el2h_irq_invalid
232 invalid_vector el2h_fiq_invalid
233 invalid_vector el1_fiq_invalid
239 .macro check_preamble_length start, end
240 /* kvm_patch_vector_branch() generates code that jumps over the preamble. */
241 .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
242 .error "KVM vector preamble length mismatch"
246 .macro valid_vect target
250 stp x0, x1, [sp, #-16]!
254 check_preamble_length 661b, 662b
257 .macro invalid_vect target
263 ldp x0, x1, [sp], #16
266 check_preamble_length 661b, 662b
269 SYM_CODE_START(__kvm_hyp_vector)
270 invalid_vect el2t_sync_invalid // Synchronous EL2t
271 invalid_vect el2t_irq_invalid // IRQ EL2t
272 invalid_vect el2t_fiq_invalid // FIQ EL2t
273 invalid_vect el2t_error_invalid // Error EL2t
275 valid_vect el2_sync // Synchronous EL2h
276 invalid_vect el2h_irq_invalid // IRQ EL2h
277 invalid_vect el2h_fiq_invalid // FIQ EL2h
278 valid_vect el2_error // Error EL2h
280 valid_vect el1_sync // Synchronous 64-bit EL1
281 valid_vect el1_irq // IRQ 64-bit EL1
282 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
283 valid_vect el1_error // Error 64-bit EL1
285 valid_vect el1_sync // Synchronous 32-bit EL1
286 valid_vect el1_irq // IRQ 32-bit EL1
287 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
288 valid_vect el1_error // Error 32-bit EL1
289 SYM_CODE_END(__kvm_hyp_vector)
291 #ifdef CONFIG_KVM_INDIRECT_VECTORS
299 * The default sequence is to directly branch to the KVM vectors,
300 * using the computed offset. This applies for VHE as well as
301 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
303 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
306 * stp x0, x1, [sp, #-16]!
307 * movz x0, #(addr & 0xffff)
308 * movk x0, #((addr >> 16) & 0xffff), lsl #16
309 * movk x0, #((addr >> 32) & 0xffff), lsl #32
313 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
314 * See kvm_patch_vector_branch for details.
316 alternative_cb kvm_patch_vector_branch
317 stp x0, x1, [sp, #-16]!
318 b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
325 .macro generate_vectors
330 .org 0b + SZ_2K // Safety measure
334 SYM_CODE_START(__bp_harden_hyp_vecs)
335 .rept BP_HARDEN_EL2_SLOTS
338 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
340 SYM_CODE_END(__bp_harden_hyp_vecs)