OSDN Git Service

arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
authorMark Rutland <mark.rutland@arm.com>
Thu, 12 Apr 2018 11:11:32 +0000 (12:11 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 20 Apr 2018 06:21:05 +0000 (08:21 +0200)
From: Marc Zyngier <marc.zyngier@arm.com>

commit f72af90c3783d924337624659b43e2d36f1b36b4 upstream.

We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible.
So let's intercept it as early as we can by testing for the
function call number as soon as we've identified a HVC call
coming from the guest.

Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Mark Rutland <mark.rutland@arm.com> [v4.9 backport]
Tested-by: Greg Hackmann <ghackmann@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm64/kvm/hyp/hyp-entry.S

index 4e92399..4e9d50c 100644 (file)
@@ -15,6 +15,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/linkage.h>
 
 #include <asm/alternative.h>
@@ -79,10 +80,11 @@ alternative_endif
        lsr     x0, x1, #ESR_ELx_EC_SHIFT
 
        cmp     x0, #ESR_ELx_EC_HVC64
+       ccmp    x0, #ESR_ELx_EC_HVC32, #4, ne
        b.ne    el1_trap
 
-       mrs     x1, vttbr_el2           // If vttbr is valid, the 64bit guest
-       cbnz    x1, el1_trap            // called HVC
+       mrs     x1, vttbr_el2           // If vttbr is valid, the guest
+       cbnz    x1, el1_hvc_guest       // called HVC
 
        /* Here, we're pretty sure the host called HVC. */
        ldp     x0, x1, [sp], #16
@@ -101,6 +103,20 @@ alternative_endif
 
 2:     eret
 
+el1_hvc_guest:
+       /*
+        * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
+        * The workaround has already been applied on the host,
+        * so let's quickly get back to the guest. We don't bother
+        * restoring x1, as it can be clobbered anyway.
+        */
+       ldr     x1, [sp]                                // Guest's x0
+       eor     w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+       cbnz    w1, el1_trap
+       mov     x0, x1
+       add     sp, sp, #16
+       eret
+
 el1_trap:
        /*
         * x0: ESR_EC