From 6e5a6506b1b6657b5a6f042caada486d19cf06a5 Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Wed, 7 Feb 2018 23:59:14 +0000 Subject: [PATCH] [x86] Fix nasty bug in the x86 backend that is essentially impossible to hit from IR but creates a minefield for MI passes. The x86 backend has fairly powerful logic to try and fold loads that feed register operands to instructions into a memory operand on the instruction. This is almost always a good thing, but there are specific relocated loads that are only allowed to appear in specific instructions. Notably, R_X86_64_GOTTPOFF is only allowed in `movq` and `addq`. This patch blocks folding of memory operands using this relocation unless the target is in fact `addq`. The particular relocation indicates why we simply don't hit this under normal circumstances. This relocation is only used for TLS, and it gets used in very specific ways in conjunction with %fs-relative addressing. The result is that loads using this relocation are essentially never eligible for folding into an instruction's memory operands. Unless, of course, you have an MI pass that inserts usage of such a load. I have exactly such an MI pass and was greeted by truly mysterious miscompiles where the linker replaced my instruction with a completely garbage byte sequence. Go team. This is the only such relocation I'm aware of in x86, but there may be others that need to be similarly restricted. Fixes PR36165. Differential Revision: https://reviews.llvm.org/D42732 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@324546 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrInfo.cpp | 8 ++++ test/CodeGen/X86/bad-tls-fold.mir | 77 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 test/CodeGen/X86/bad-tls-fold.mir diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 19e9f30c91d..ab6ea1e06bc 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -8535,6 +8535,14 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS) return nullptr; + // GOTTPOFF relocation loads can only be folded into add instructions. + // FIXME: Need to exclude other relocations that only support specific + // instructions. + if (MOs.size() == X86::AddrNumOperands && + MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF && + MI.getOpcode() != X86::ADD64rr) + return nullptr; + MachineInstr *NewMI = nullptr; // Attempt to fold any custom cases we have. diff --git a/test/CodeGen/X86/bad-tls-fold.mir b/test/CodeGen/X86/bad-tls-fold.mir new file mode 100644 index 00000000000..550d0db3cb0 --- /dev/null +++ b/test/CodeGen/X86/bad-tls-fold.mir @@ -0,0 +1,77 @@ +# RUN: llc -x mir < %s | FileCheck %s +--- | + target triple = "x86_64-unknown-linux-gnu" + + @x = external global i64 + @i = external thread_local global i32 + + define i32 @or() { + entry: + ret i32 undef + } + + define i32 @and() { + entry: + ret i32 undef + } +... +--- +# CHECK-LABEL: or: +name: or +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: gr64 } + - { id: 1, class: gr64 } + - { id: 2, class: gr64 } + - { id: 3, class: gr64 } + - { id: 4, class: gr32 } +body: | + bb.0.entry: + %0:gr64 = MOV64rm $rip, 1, $noreg, @x, $noreg :: (load 8) + %1:gr64 = OR64ri8 %0, 7, implicit-def dead $eflags + %2:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gottpoff) @i, $noreg :: (load 8) + %3:gr64 = OR64rr %2, %1, implicit-def dead $eflags + %4:gr32 = MOV32rm killed %3, 1, $noreg, 0, $fs :: (load 4) + ; CHECK-NOT: orq {{.*}}GOTTPOFF{{.*}} + ; + ; What we actually expect: + ; CHECK: movq {{.*}}GOTTPOFF{{.*}}, %[[R:.*]] + ; CHECK-NEXT: orq %{{.*}}, %[[R]] + ; CHECK-NEXT: movl %fs:(%[[R]]), + ; + ; CHECK-NOT: orq {{.*}}GOTTPOFF{{.*}} + $eax = COPY %4 + RET 0, $eax + +... +--- +# CHECK-LABEL: and: +name: and +alignment: 4 +tracksRegLiveness: true +registers: + - { id: 0, class: gr64 } + - { id: 1, class: gr64 } + - { id: 2, class: gr64 } + - { id: 3, class: gr64 } + - { id: 4, class: gr32 } +body: | + bb.0.entry: + %0:gr64 = MOV64rm $rip, 1, $noreg, @x, $noreg :: (load 8) + %1:gr64 = OR64ri8 %0, 7, implicit-def dead $eflags + %2:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gottpoff) @i, $noreg :: (load 8) + %3:gr64 = AND64rr %2, %1, implicit-def dead $eflags + %4:gr32 = MOV32rm killed %3, 1, $noreg, 0, $fs :: (load 4) + ; CHECK-NOT: andq {{.*}}GOTTPOFF{{.*}} + ; + ; What we actually expect: + ; CHECK: movq {{.*}}GOTTPOFF{{.*}}, %[[R:.*]] + ; CHECK-NEXT: andq %{{.*}}, %[[R]] + ; CHECK-NEXT: movl %fs:(%[[R]]), + ; + ; CHECK-NOT: andq {{.*}}GOTTPOFF{{.*}} + $eax = COPY %4 + RET 0, $eax + +... -- 2.11.0