From: Craig Topper Date: Fri, 31 Jan 2020 05:23:05 +0000 (-0800) Subject: [X86] Don't exit from foldOffsetIntoAddress if the Offset is 0, but AM.Disp is non... X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=d975910c50fb05b466f64beab1d43f8e8402bbdd;p=android-x86%2Fexternal-llvm-project.git [X86] Don't exit from foldOffsetIntoAddress if the Offset is 0, but AM.Disp is non-zero. This is an alternate fix for the issue D73606 was trying to solve. The main issue here is that we bailed out of foldOffsetIntoAddress if Offset is 0. But if we just found a symbolic displacement and AM.Disp became non-zero earlier, we still need to validate that AM.Disp with the symbolic displacement. This is my second attempt at committing this after failing build bots previously. One thing I realized about the previous attempt is that its possible that AM.Disp is already non-zero and the new Offset changes it back to zero. In that case my previous attempt failed to update AM.Disp to zero. So this patch removes the early out for 0 and appropriately handle the 0 case in each check so we still update AM.Disp at the end. --- diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index ebaa7cf2c23..b174a0856b2 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -1409,18 +1409,20 @@ static bool isDispSafeForFrameIndex(int64_t Val) { bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM) { - // If there's no offset to fold, we don't need to do any work. - if (Offset == 0) - return false; + // We may have already matched a displacement and the caller just added the + // symbolic displacement. So we still need to do the checks even if Offset + // is zero. + + int64_t Val = AM.Disp + Offset; // Cannot combine ExternalSymbol displacements with integer offsets. - if (AM.ES || AM.MCSym) + if (Val != 0 && (AM.ES || AM.MCSym)) return true; - int64_t Val = AM.Disp + Offset; CodeModel::Model M = TM.getCodeModel(); if (Subtarget->is64Bit()) { - if (!X86::isOffsetSuitableForCodeModel(Val, M, + if (Val != 0 && + !X86::isOffsetSuitableForCodeModel(Val, M, AM.hasSymbolicDisplacement())) return true; // In addition to the checks required for a register base, check that @@ -1581,24 +1583,13 @@ bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM, if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) && !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) return false; - - // Don't try commuting operands if the address is in the form of - // sym+disp(%rip). foldOffsetIntoAddress() currently does not know there is a - // symbolic displacement and would fold disp. If disp is just a bit smaller - // than 2**31, it can easily cause a relocation overflow. - bool NoCommutate = false; - if (AM.isRIPRelative() && AM.hasSymbolicDisplacement()) - if (ConstantSDNode *Cst = - dyn_cast(Handle.getValue().getOperand(1))) - NoCommutate = Cst->getSExtValue() != 0; - AM = Backup; - if (!NoCommutate) { - // Try again after commutating the operands. - if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth + 1) && - !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1)) - return false; - } + + // Try again after commutating the operands. + if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, + Depth + 1) && + !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1)) + return false; AM = Backup; // If we couldn't fold both operands into the address at the same time,