From: Sanjay Patel Date: Tue, 19 Jul 2016 22:09:34 +0000 (+0000) Subject: [InstCombine] fold add(zext(xor X, C), C) --> sext X when C is INT_MIN in the source... X-Git-Tag: android-x86-7.1-r4~29994 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=11faea381faa3e73d2532bde864df1936713be97;p=android-x86%2Fexternal-llvm.git [InstCombine] fold add(zext(xor X, C), C) --> sext X when C is INT_MIN in the source type The pattern may look more obviously like a sext if written as: define i32 @g(i16 %x) { %zext = zext i16 %x to i32 %xor = xor i32 %zext, 32768 %add = add i32 %xor, -32768 ret i32 %add } We already have that fold in visitAdd(). Differential Revision: https://reviews.llvm.org/D22477 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@276035 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 221a2200717..8cb124d26b9 100644 --- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1047,6 +1047,16 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // X + (signbit) --> X ^ signbit if (Val->isSignBit()) return BinaryOperator::CreateXor(LHS, RHS); + + // Is this add the last step in a convoluted sext? + Value *X; + const APInt *C; + if (match(LHS, m_ZExt(m_Xor(m_Value(X), m_APInt(C)))) && + C->isMinSignedValue() && + C->sext(LHS->getType()->getScalarSizeInBits()) == *Val) { + // add(zext(xor i16 X, -32768), -32768) --> sext X + return CastInst::Create(Instruction::SExt, X, LHS->getType()); + } } // FIXME: Use the match above instead of dyn_cast to allow these transforms diff --git a/test/Transforms/InstCombine/apint-add.ll b/test/Transforms/InstCombine/apint-add.ll index f5b5bcb9d10..c55fd0419a6 100644 --- a/test/Transforms/InstCombine/apint-add.ll +++ b/test/Transforms/InstCombine/apint-add.ll @@ -56,9 +56,7 @@ define i49 @test4(i49 %x) { define i7 @sext(i4 %x) { ; CHECK-LABEL: @sext( -; CHECK-NEXT: [[XOR:%.*]] = xor i4 %x, -8 -; CHECK-NEXT: [[ZEXT:%.*]] = zext i4 [[XOR]] to i7 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i7 [[ZEXT]], -8 +; CHECK-NEXT: [[ADD:%.*]] = sext i4 %x to i7 ; CHECK-NEXT: ret i7 [[ADD]] ; %xor = xor i4 %x, -8 @@ -69,9 +67,7 @@ define i7 @sext(i4 %x) { define <2 x i10> @sext_vec(<2 x i3> %x) { ; CHECK-LABEL: @sext_vec( -; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i3> %x, -; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i3> [[XOR]] to <2 x i10> -; CHECK-NEXT: [[ADD:%.*]] = add nsw <2 x i10> [[ZEXT]], +; CHECK-NEXT: [[ADD:%.*]] = sext <2 x i3> %x to <2 x i10> ; CHECK-NEXT: ret <2 x i10> [[ADD]] ; %xor = xor <2 x i3> %x, @@ -80,6 +76,27 @@ define <2 x i10> @sext_vec(<2 x i3> %x) { ret <2 x i10> %add } +; Multiple uses of the operands don't prevent the fold. + +define i4 @sext_multiuse(i4 %x) { +; CHECK-LABEL: @sext_multiuse( +; CHECK-NEXT: [[XOR:%.*]] = xor i4 %x, -8 +; CHECK-NEXT: [[ZEXT:%.*]] = zext i4 [[XOR]] to i7 +; CHECK-NEXT: [[ADD:%.*]] = sext i4 %x to i7 +; CHECK-NEXT: [[MUL:%.*]] = sdiv i7 [[ZEXT]], [[ADD]] +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i7 [[MUL]] to i4 +; CHECK-NEXT: [[DIV:%.*]] = sdiv i4 [[TRUNC]], [[XOR]] +; CHECK-NEXT: ret i4 [[DIV]] +; + %xor = xor i4 %x, -8 + %zext = zext i4 %xor to i7 + %add = add nsw i7 %zext, -8 + %mul = sdiv i7 %zext, %add + %trunc = trunc i7 %mul to i4 + %div = sdiv i4 %trunc, %xor + ret i4 %div +} + ; Tests for Integer BitWidth > 64 && BitWidth <= 1024. ;; Flip sign bit then add INT_MIN -> nop.