From 04e8cc79cd3b9bb6d30ae32c36204d1fbd133b1e Mon Sep 17 00:00:00 2001 From: Juergen Ributzka Date: Tue, 22 Jul 2014 21:42:55 +0000 Subject: [PATCH] [RuntimeDyld][MachO][AArch64] Add a helper function for encoding addends in instructions. Factor out the addend encoding into a helper function and simplify the processRelocationRef. Also add a few simple rtdyld tests. More tests to come once GOTs can be tested too. Related to git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@213689 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h | 185 ++++++++++++--------- .../RuntimeDyld/AArch64/MachO_ARM64_relocations.s | 54 ++++++ .../RuntimeDyld/AArch64/lit.local.cfg | 3 + 3 files changed, 167 insertions(+), 75 deletions(-) create mode 100644 test/ExecutionEngine/RuntimeDyld/AArch64/MachO_ARM64_relocations.s create mode 100644 test/ExecutionEngine/RuntimeDyld/AArch64/lit.local.cfg diff --git a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h index 486cb56dc11..7043ba36f42 100644 --- a/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h +++ b/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h @@ -118,6 +118,109 @@ public: return Addend; } + /// Extract the addend encoded in the instruction. + void encodeAddend(uint8_t *LocalAddress, uint32_t RelType, + int64_t Addend) const { + // Verify that the relocation has the correct alignment. + switch (RelType) { + default: + llvm_unreachable("Unsupported relocation type!"); + case MachO::ARM64_RELOC_UNSIGNED: + llvm_unreachable("Invalid relocation type for instruction."); + case MachO::ARM64_RELOC_BRANCH26: + case MachO::ARM64_RELOC_PAGE21: + case MachO::ARM64_RELOC_PAGEOFF12: + case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: + case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: + assert((((uintptr_t)LocalAddress & 0x3) == 0) && + "Instruction address is not aligned to 4 bytes."); + break; + } + + switch (RelType) { + default: + llvm_unreachable("Unsupported relocation type!"); + case MachO::ARM64_RELOC_BRANCH26: { + // Verify that the relocation points to the expected branch instruction. + uint32_t *p = (uint32_t *)LocalAddress; + assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction."); + + // Verify addend value. + assert((Addend & 0x3) == 0 && "Branch target is not aligned"); + assert(isInt<28>(Addend) && "Branch target is out of range."); + + // Encode the addend as 26 bit immediate in the branch instruction. + *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF); + break; + } + case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: + case MachO::ARM64_RELOC_PAGE21: { + // Verify that the relocation points to the expected adrp instruction. + uint32_t *p = (uint32_t *)LocalAddress; + assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction."); + + // Check that the addend fits into 21 bits (+ 12 lower bits). + assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned."); + assert(isInt<33>(Addend) && "Invalid page reloc value."); + + // Encode the addend into the instruction. + uint32_t ImmLoValue = (uint32_t)(Addend << 17) & 0x60000000; + uint32_t ImmHiValue = (uint32_t)(Addend >> 9) & 0x00FFFFE0; + *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue; + break; + } + case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: { + // Verify that the relocation points to one of the expected load / store + // instructions. + uint32_t *p = (uint32_t *)LocalAddress; + assert((*p & 0x3B000000) == 0x39000000 && + "Only expected load / store instructions."); + } // fall-through + case MachO::ARM64_RELOC_PAGEOFF12: { + // Verify that the relocation points to one of the expected load / store + // or add / sub instructions. + uint32_t *p = (uint32_t *)LocalAddress; + assert((((*p & 0x3B000000) == 0x39000000) || + ((*p & 0x11C00000) == 0x11000000) ) && + "Expected load / store or add/sub instruction."); + + // Check which instruction we are decoding to obtain the implicit shift + // factor of the instruction and verify alignment. + int ImplicitShift = 0; + if ((*p & 0x3B000000) == 0x39000000) { // << load / store + // For load / store instructions the size is encoded in bits 31:30. + ImplicitShift = ((*p >> 30) & 0x3); + switch (ImplicitShift) { + case 0: + // Check if this a vector op to get the correct shift value. + if ((*p & 0x04800000) == 0x04800000) { + ImplicitShift = 4; + assert(((Addend & 0xF) == 0) && + "128-bit LDR/STR not 16-byte aligned."); + } + break; + case 1: + assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned."); + break; + case 2: + assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned."); + break; + case 3: + assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned."); + break; + } + } + // Compensate for implicit shift. + Addend >>= ImplicitShift; + assert(isUInt<12>(Addend) && "Addend cannot be encoded."); + + // Encode the addend into the instruction. + *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00); + break; + } + } + } + relocation_iterator processRelocationRef(unsigned SectionID, relocation_iterator RelI, ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID, @@ -196,105 +299,37 @@ public: } case MachO::ARM64_RELOC_BRANCH26: { assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported"); - // Mask the value into the target address. We know instructions are - // 32-bit aligned, so we can do it all at once. - uint32_t *p = (uint32_t *)LocalAddress; - // Check if the addend is encoded in the instruction. - uint32_t EncodedAddend = *p & 0x03FFFFFF; - if (EncodedAddend != 0) { - if (RE.Addend == 0) - llvm_unreachable("branch26 instruction has embedded addend."); - else - llvm_unreachable("branch26 instruction has embedded addend and" - "ARM64_RELOC_ADDEND."); - } // Check if branch is in range. uint64_t FinalAddress = Section.LoadAddress + RE.Offset; - uint64_t PCRelVal = Value - FinalAddress + RE.Addend; - assert(isInt<26>(PCRelVal) && "Branch target out of range!"); - // Insert the value into the instruction. - *p = (*p & 0xFC000000) | ((uint32_t)(PCRelVal >> 2) & 0x03FFFFFF); + int64_t PCRelVal = Value - FinalAddress + RE.Addend; + encodeAddend(LocalAddress, RE.RelType, PCRelVal); break; } case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: case MachO::ARM64_RELOC_PAGE21: { assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported"); - // Mask the value into the target address. We know instructions are - // 32-bit aligned, so we can do it all at once. - uint32_t *p = (uint32_t *)LocalAddress; - // Check if the addend is encoded in the instruction. - uint32_t EncodedAddend = - ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3); - if (EncodedAddend != 0) { - if (RE.Addend == 0) - llvm_unreachable("adrp instruction has embedded addend."); - else - llvm_unreachable("adrp instruction has embedded addend and" - "ARM64_RELOC_ADDEND."); - } // Adjust for PC-relative relocation and offset. uint64_t FinalAddress = Section.LoadAddress + RE.Offset; - uint64_t PCRelVal = - ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096)); - // Check that the value fits into 21 bits (+ 12 lower bits). - assert(isInt<33>(PCRelVal) && "Invalid page reloc value!"); - // Insert the value into the instruction. - uint32_t ImmLoValue = (uint32_t)(PCRelVal << 17) & 0x60000000; - uint32_t ImmHiValue = (uint32_t)(PCRelVal >> 9) & 0x00FFFFE0; - *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue; + int64_t PCRelVal = + ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096)); + encodeAddend(LocalAddress, RE.RelType, PCRelVal); break; } case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: case MachO::ARM64_RELOC_PAGEOFF12: { assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported"); - // Mask the value into the target address. We know instructions are - // 32-bit aligned, so we can do it all at once. - uint32_t *p = (uint32_t *)LocalAddress; - // Check if the addend is encoded in the instruction. - uint32_t EncodedAddend = *p & 0x003FFC00; - if (EncodedAddend != 0) { - if (RE.Addend == 0) - llvm_unreachable("adrp instruction has embedded addend."); - else - llvm_unreachable("adrp instruction has embedded addend and" - "ARM64_RELOC_ADDEND."); - } // Add the offset from the symbol. Value += RE.Addend; // Mask out the page address and only use the lower 12 bits. Value &= 0xFFF; - // Check which instruction we are updating to obtain the implicit shift - // factor from LDR/STR instructions. - if (*p & 0x08000000) { - uint32_t ImplicitShift = ((*p >> 30) & 0x3); - switch (ImplicitShift) { - case 0: - // Check if this a vector op. - if ((*p & 0x04800000) == 0x04800000) { - ImplicitShift = 4; - assert(((Value & 0xF) == 0) && - "128-bit LDR/STR not 16-byte aligned."); - } - break; - case 1: - assert(((Value & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned."); - case 2: - assert(((Value & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned."); - case 3: - assert(((Value & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned."); - } - // Compensate for implicit shift. - Value >>= ImplicitShift; - } - // Insert the value into the instruction. - *p = (*p & 0xFFC003FF) | ((uint32_t)(Value << 10) & 0x003FFC00); + encodeAddend(LocalAddress, RE.RelType, Value); break; } case MachO::ARM64_RELOC_SUBTRACTOR: case MachO::ARM64_RELOC_POINTER_TO_GOT: case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21: case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12: - llvm_unreachable("Relocation type not implemented yet!"); + llvm_unreachable("Relocation type not yet implemented!"); case MachO::ARM64_RELOC_ADDEND: llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by " "processRelocationRef!"); diff --git a/test/ExecutionEngine/RuntimeDyld/AArch64/MachO_ARM64_relocations.s b/test/ExecutionEngine/RuntimeDyld/AArch64/MachO_ARM64_relocations.s new file mode 100644 index 00000000000..510d9b05473 --- /dev/null +++ b/test/ExecutionEngine/RuntimeDyld/AArch64/MachO_ARM64_relocations.s @@ -0,0 +1,54 @@ +# RUN: llvm-mc -triple=arm64-apple-ios7.0.0 -code-model=small -relocation-model=pic -filetype=obj -o %t.o %s +# RUN: llvm-rtdyld -triple=arm64-apple-ios7.0.0 -verify -check=%s %t.o +# RUN: rm %t.o + +# FIXME: Add GOT relocation tests once GOT testing is supported. + + .section __TEXT,__text,regular,pure_instructions + .ios_version_min 7, 0 + .globl foo + .align 2 +foo: + movz w0, #0 + ret + + .globl _test_branch_reloc + .align 2 + + +# Test ARM64_RELOC_BRANCH26 relocation. The branch instruction only encodes 26 +# bits of the 28-bit possible branch range. The lower two bits are always zero +# and therefore ignored. +# rtdyld-check: decode_operand(br1, 0)[25:0] = (foo-br1)[27:2] +_test_branch_reloc: +br1: + b foo + ret + + +# Test ARM64_RELOC_UNSIGNED relocation. The absolute 64-bit address of the +# function should be stored at the 8-byte memory location. +# rtdyld-check: *{8}ptr = foo + .section __DATA,__data + .globl ptr + .align 3 + .fill 8192, 1, 0 +ptr: + .quad foo + + +# Test ARM64_RELOC_PAGE21 and ARM64_RELOC_PAGEOFF12 relocation. adrp encodes +# the PC-relative page (4 KiB) difference between the adrp instruction and the +# variable ptr. ldr encodes the offset of the variable within the page. The ldr +# instruction perfroms an implicit shift on the encoded immediate (imm<<3). +# rtdyld-check: decode_operand(adrp1, 1) = (ptr[32:12]-adrp1[32:12]) +# rtdyld-check: decode_operand(ldr1, 2) = (ptr[11:3]) + .globl _test_adrp_ldr + .align 2 +_test_adrp_ldr: +adrp1: + adrp x0, ptr@PAGE +ldr1: + ldr x0, [x0, ptr@PAGEOFF] + ret + .fill 8192, 1, 0 diff --git a/test/ExecutionEngine/RuntimeDyld/AArch64/lit.local.cfg b/test/ExecutionEngine/RuntimeDyld/AArch64/lit.local.cfg new file mode 100644 index 00000000000..cec29af5bbe --- /dev/null +++ b/test/ExecutionEngine/RuntimeDyld/AArch64/lit.local.cfg @@ -0,0 +1,3 @@ +if not 'AArch64' in config.root.targets: + config.unsupported = True + -- 2.11.0