OSDN Git Service

KVM: arm64: Add kimg_hyp_va() helper
authorMarc Zyngier <maz@kernel.org>
Wed, 21 Oct 2020 19:48:02 +0000 (20:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 9 Nov 2020 16:56:39 +0000 (16:56 +0000)
KVM/arm64 is so far unable to deal with function pointers, as the compiler
will generate the kernel's runtime VA, and not the linear mapping address,
meaning that kern_hyp_va() will give the wrong result.

We so far have been able to use PC-relative addressing, but that's not
always easy to use, and prevents the implementation of things like
the mapping of an index to a pointer.

To allow this, provide a new helper that computes the required
translation from the kernel image to the HYP VA space.

Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/va_layout.c

index 3313943..608c3a8 100644 (file)
@@ -98,6 +98,24 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
 
 #define kern_hyp_va(v)         ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
 
+static __always_inline unsigned long __kimg_hyp_va(unsigned long v)
+{
+       unsigned long offset;
+
+       asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
+                                   "movk %0, #0, lsl #16\n"
+                                   "movk %0, #0, lsl #32\n"
+                                   "movk %0, #0, lsl #48\n",
+                                   kvm_update_kimg_phys_offset)
+                    : "=r" (offset));
+
+       return __kern_hyp_va((v - offset) | PAGE_OFFSET);
+}
+
+#define kimg_fn_hyp_va(v)      ((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
+
+#define kimg_fn_ptr(x) (typeof(x) **)(x)
+
 /*
  * We currently support using a VM-specified IPA size. For backward
  * compatibility, the default IPA size is fixed to 40bits.
index e0404bc..1d00d2c 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/debug-monitors.h>
 #include <asm/insn.h>
 #include <asm/kvm_mmu.h>
+#include <asm/memory.h>
 
 /*
  * The LSB of the HYP VA tag
@@ -201,3 +202,52 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
                                           AARCH64_INSN_BRANCH_NOLINK);
        *updptr++ = cpu_to_le32(insn);
 }
+
+static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+       u32 insn, oinsn, rd;
+
+       BUG_ON(nr_inst != 4);
+
+       /* Compute target register */
+       oinsn = le32_to_cpu(*origptr);
+       rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
+
+       /* movz rd, #(val & 0xffff) */
+       insn = aarch64_insn_gen_movewide(rd,
+                                        (u16)val,
+                                        0,
+                                        AARCH64_INSN_VARIANT_64BIT,
+                                        AARCH64_INSN_MOVEWIDE_ZERO);
+       *updptr++ = cpu_to_le32(insn);
+
+       /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
+       insn = aarch64_insn_gen_movewide(rd,
+                                        (u16)(val >> 16),
+                                        16,
+                                        AARCH64_INSN_VARIANT_64BIT,
+                                        AARCH64_INSN_MOVEWIDE_KEEP);
+       *updptr++ = cpu_to_le32(insn);
+
+       /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
+       insn = aarch64_insn_gen_movewide(rd,
+                                        (u16)(val >> 32),
+                                        32,
+                                        AARCH64_INSN_VARIANT_64BIT,
+                                        AARCH64_INSN_MOVEWIDE_KEEP);
+       *updptr++ = cpu_to_le32(insn);
+
+       /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
+       insn = aarch64_insn_gen_movewide(rd,
+                                        (u16)(val >> 48),
+                                        48,
+                                        AARCH64_INSN_VARIANT_64BIT,
+                                        AARCH64_INSN_MOVEWIDE_KEEP);
+       *updptr++ = cpu_to_le32(insn);
+}
+
+void kvm_update_kimg_phys_offset(struct alt_instr *alt,
+                                __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+       generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst);
+}