OSDN Git Service

arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests
authorMarc Zyngier <marc.zyngier@arm.com>
Fri, 20 Jul 2018 09:56:33 +0000 (10:56 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 22 Jul 2018 12:27:42 +0000 (14:27 +0200)
commit b4f18c063a13dfb33e3a63fe1844823e19c2265e upstream.

In order to forward the guest's ARCH_WORKAROUND_2 calls to EL3,
add a small(-ish) sequence to handle it at EL2. Special care must
be taken to track the state of the guest itself by updating the
workaround flags. We also rely on patching to enable calls into
the firmware.

Note that since we need to execute branches, this always executes
after the Spectre-v2 mitigation has been applied.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/kernel/asm-offsets.c
arch/arm64/kvm/hyp/hyp-entry.S

index 6e6375e..bd239b1 100644 (file)
@@ -127,6 +127,7 @@ int main(void)
   BLANK();
 #ifdef CONFIG_KVM_ARM_HOST
   DEFINE(VCPU_CONTEXT,         offsetof(struct kvm_vcpu, arch.ctxt));
+  DEFINE(VCPU_WORKAROUND_FLAGS,        offsetof(struct kvm_vcpu, arch.workaround_flags));
   DEFINE(CPU_GP_REGS,          offsetof(struct kvm_cpu_context, gp_regs));
   DEFINE(CPU_USER_PT_REGS,     offsetof(struct kvm_regs, regs));
   DEFINE(CPU_FP_REGS,          offsetof(struct kvm_regs, fp_regs));
index 3418c1d..bf4988f 100644 (file)
@@ -107,8 +107,44 @@ el1_hvc_guest:
         */
        ldr     x1, [sp]                                // Guest's x0
        eor     w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+       cbz     w1, wa_epilogue
+
+       /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
+       eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
+                         ARM_SMCCC_ARCH_WORKAROUND_2)
        cbnz    w1, el1_trap
-       mov     x0, x1
+
+#ifdef CONFIG_ARM64_SSBD
+alternative_cb arm64_enable_wa2_handling
+       b       wa2_end
+alternative_cb_end
+       get_vcpu_ptr    x2, x0
+       ldr     x0, [x2, #VCPU_WORKAROUND_FLAGS]
+
+       // Sanitize the argument and update the guest flags
+       ldr     x1, [sp, #8]                    // Guest's x1
+       clz     w1, w1                          // Murphy's device:
+       lsr     w1, w1, #5                      // w1 = !!w1 without using
+       eor     w1, w1, #1                      // the flags...
+       bfi     x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
+       str     x0, [x2, #VCPU_WORKAROUND_FLAGS]
+
+       /* Check that we actually need to perform the call */
+       hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
+       cbz     x0, wa2_end
+
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
+       smc     #0
+
+       /* Don't leak data from the SMC call */
+       mov     x3, xzr
+wa2_end:
+       mov     x2, xzr
+       mov     x1, xzr
+#endif
+
+wa_epilogue:
+       mov     x0, xzr
        add     sp, sp, #16
        eret