From 50d2be30be90f7ab1fd602b377356afbee06b22d Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Tue, 16 Apr 2019 18:55:16 +0000 Subject: [PATCH] [IR] Add WithOverflowInst class This adds a WithOverflowInst class with a few helper methods to get the underlying binop, signedness and nowrap type and makes use of it where sensible. There will be two more uses in D60650/D60656. The refactorings are all NFC, though I left some TODOs where things could be improved. In particular we have two places where add/sub are handled but mul isn't. Differential Revision: https://reviews.llvm.org/D60668 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@358512 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Analysis/ValueTracking.h | 7 +- include/llvm/IR/IntrinsicInst.h | 33 +++++++++ lib/Analysis/ScalarEvolution.cpp | 57 ++++----------- lib/Analysis/ValueTracking.cpp | 15 +--- lib/IR/IntrinsicInst.cpp | 34 +++++++++ .../InstCombine/InstructionCombining.cpp | 65 +++++------------ .../Scalar/CorrelatedValuePropagation.cpp | 85 ++++++++-------------- lib/Transforms/Scalar/GVN.cpp | 39 +++------- lib/Transforms/Scalar/NewGVN.cpp | 40 ++-------- 9 files changed, 154 insertions(+), 221 deletions(-) diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h index 11d2f513931..d380ad5527a 100644 --- a/include/llvm/Analysis/ValueTracking.h +++ b/include/llvm/Analysis/ValueTracking.h @@ -32,6 +32,7 @@ class DataLayout; class DominatorTree; class GEPOperator; class IntrinsicInst; +class WithOverflowInst; struct KnownBits; class Loop; class LoopInfo; @@ -454,10 +455,10 @@ class Value; const Instruction *CxtI, const DominatorTree *DT); - /// Returns true if the arithmetic part of the \p II 's result is + /// Returns true if the arithmetic part of the \p WO 's result is /// used only along the paths control dependent on the computation - /// not overflowing, \p II being an .with.overflow intrinsic. - bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II, + /// not overflowing, \p WO being an .with.overflow intrinsic. + bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT); diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h index 62bb4fa61d7..438cf90ec8d 100644 --- a/include/llvm/IR/IntrinsicInst.h +++ b/include/llvm/IR/IntrinsicInst.h @@ -265,6 +265,39 @@ namespace llvm { } }; + /// This class represents a op.with.overflow intrinsic. + class WithOverflowInst : public IntrinsicInst { + public: + static bool classof(const IntrinsicInst *I) { + switch (I->getIntrinsicID()) { + case Intrinsic::uadd_with_overflow: + case Intrinsic::sadd_with_overflow: + case Intrinsic::usub_with_overflow: + case Intrinsic::ssub_with_overflow: + case Intrinsic::umul_with_overflow: + case Intrinsic::smul_with_overflow: + return true; + default: + return false; + } + } + static bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } + + Value *getLHS() const { return const_cast(getArgOperand(0)); } + Value *getRHS() const { return const_cast(getArgOperand(1)); } + + /// Returns the binary operation underlying the intrinsic. + Instruction::BinaryOps getBinaryOp() const; + + /// Whether the intrinsic is signed or unsigned. + bool isSigned() const; + + /// Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap. + unsigned getNoWrapKind() const; + }; + /// Common base class for all memory intrinsics. Simply provides /// common methods. /// Written as CRTP to avoid a common base class amongst the diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 350eb3d8a7b..01b4019917d 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -4575,52 +4575,21 @@ static Optional MatchBinaryOp(Value *V, DominatorTree &DT) { if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) break; - auto *CI = dyn_cast(EVI->getAggregateOperand()); - if (!CI) + auto *WO = dyn_cast(EVI->getAggregateOperand()); + if (!WO) break; - if (auto *F = CI->getCalledFunction()) - switch (F->getIntrinsicID()) { - case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: - if (!isOverflowIntrinsicNoWrap(cast(CI), DT)) - return BinaryOp(Instruction::Add, CI->getArgOperand(0), - CI->getArgOperand(1)); - - // Now that we know that all uses of the arithmetic-result component of - // CI are guarded by the overflow check, we can go ahead and pretend - // that the arithmetic is non-overflowing. - if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) - return BinaryOp(Instruction::Add, CI->getArgOperand(0), - CI->getArgOperand(1), /* IsNSW = */ true, - /* IsNUW = */ false); - else - return BinaryOp(Instruction::Add, CI->getArgOperand(0), - CI->getArgOperand(1), /* IsNSW = */ false, - /* IsNUW*/ true); - case Intrinsic::ssub_with_overflow: - case Intrinsic::usub_with_overflow: - if (!isOverflowIntrinsicNoWrap(cast(CI), DT)) - return BinaryOp(Instruction::Sub, CI->getArgOperand(0), - CI->getArgOperand(1)); - - // The same reasoning as sadd/uadd above. - if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow) - return BinaryOp(Instruction::Sub, CI->getArgOperand(0), - CI->getArgOperand(1), /* IsNSW = */ true, - /* IsNUW = */ false); - else - return BinaryOp(Instruction::Sub, CI->getArgOperand(0), - CI->getArgOperand(1), /* IsNSW = */ false, - /* IsNUW = */ true); - case Intrinsic::smul_with_overflow: - case Intrinsic::umul_with_overflow: - return BinaryOp(Instruction::Mul, CI->getArgOperand(0), - CI->getArgOperand(1)); - default: - break; - } - break; + Instruction::BinaryOps BinOp = WO->getBinaryOp(); + bool Signed = WO->isSigned(); + // TODO: Should add nuw/nsw flags for mul as well. + if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) + return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); + + // Now that we know that all uses of the arithmetic-result component of + // CI are guarded by the overflow check, we can go ahead and pretend + // that the arithmetic is non-overflowing. + return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), + /* IsNSW = */ Signed, /* IsNUW = */ !Signed); } default: diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 4bb4d307995..9c7b0fad5a8 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -4206,23 +4206,12 @@ OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); } -bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, +bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT) { -#ifndef NDEBUG - auto IID = II->getIntrinsicID(); - assert((IID == Intrinsic::sadd_with_overflow || - IID == Intrinsic::uadd_with_overflow || - IID == Intrinsic::ssub_with_overflow || - IID == Intrinsic::usub_with_overflow || - IID == Intrinsic::smul_with_overflow || - IID == Intrinsic::umul_with_overflow) && - "Not an overflow intrinsic!"); -#endif - SmallVector GuardingBranches; SmallVector Results; - for (const User *U : II->users()) { + for (const User *U : WO->users()) { if (const auto *EVI = dyn_cast(U)) { assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); diff --git a/lib/IR/IntrinsicInst.cpp b/lib/IR/IntrinsicInst.cpp index 8d371aabb21..2e2da0edb08 100644 --- a/lib/IR/IntrinsicInst.cpp +++ b/lib/IR/IntrinsicInst.cpp @@ -21,6 +21,7 @@ //===----------------------------------------------------------------------===// #include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Operator.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfoMetadata.h" @@ -168,3 +169,36 @@ bool ConstrainedFPIntrinsic::isTernaryOp() const { } } +Instruction::BinaryOps WithOverflowInst::getBinaryOp() const { + switch (getIntrinsicID()) { + case Intrinsic::uadd_with_overflow: + case Intrinsic::sadd_with_overflow: + return Instruction::Add; + case Intrinsic::usub_with_overflow: + case Intrinsic::ssub_with_overflow: + return Instruction::Sub; + case Intrinsic::umul_with_overflow: + case Intrinsic::smul_with_overflow: + return Instruction::Mul; + default: + llvm_unreachable("Invalid intrinsic"); + } +} + +bool WithOverflowInst::isSigned() const { + switch (getIntrinsicID()) { + case Intrinsic::sadd_with_overflow: + case Intrinsic::ssub_with_overflow: + case Intrinsic::smul_with_overflow: + return true; + default: + return false; + } +} + +unsigned WithOverflowInst::getNoWrapKind() const { + if (isSigned()) + return OverflowingBinaryOperator::NoSignedWrap; + else + return OverflowingBinaryOperator::NoUnsignedWrap; +} diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp index 7e26dee3452..963542c50d8 100644 --- a/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2667,53 +2667,28 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { return ExtractValueInst::Create(IV->getInsertedValueOperand(), makeArrayRef(exti, exte)); } - if (IntrinsicInst *II = dyn_cast(Agg)) { - // We're extracting from an intrinsic, see if we're the only user, which - // allows us to simplify multiple result intrinsics to simpler things that - // just get one value. - if (II->hasOneUse()) { - // Check if we're grabbing the overflow bit or the result of a 'with - // overflow' intrinsic. If it's the latter we can remove the intrinsic + if (WithOverflowInst *WO = dyn_cast(Agg)) { + // We're extracting from an overflow intrinsic, see if we're the only user, + // which allows us to simplify multiple result intrinsics to simpler + // things that just get one value. + if (WO->hasOneUse()) { + // Check if we're grabbing only the result of a 'with overflow' intrinsic // and replace it with a traditional binary instruction. - switch (II->getIntrinsicID()) { - case Intrinsic::uadd_with_overflow: - case Intrinsic::sadd_with_overflow: - if (*EV.idx_begin() == 0) { // Normal result. - Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); - replaceInstUsesWith(*II, UndefValue::get(II->getType())); - eraseInstFromFunction(*II); - return BinaryOperator::CreateAdd(LHS, RHS); - } - - // If the normal result of the add is dead, and the RHS is a constant, - // we can transform this into a range comparison. - // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 - if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow) - if (ConstantInt *CI = dyn_cast(II->getArgOperand(1))) - return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0), - ConstantExpr::getNot(CI)); - break; - case Intrinsic::usub_with_overflow: - case Intrinsic::ssub_with_overflow: - if (*EV.idx_begin() == 0) { // Normal result. - Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); - replaceInstUsesWith(*II, UndefValue::get(II->getType())); - eraseInstFromFunction(*II); - return BinaryOperator::CreateSub(LHS, RHS); - } - break; - case Intrinsic::umul_with_overflow: - case Intrinsic::smul_with_overflow: - if (*EV.idx_begin() == 0) { // Normal result. - Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); - replaceInstUsesWith(*II, UndefValue::get(II->getType())); - eraseInstFromFunction(*II); - return BinaryOperator::CreateMul(LHS, RHS); - } - break; - default: - break; + if (*EV.idx_begin() == 0) { + Instruction::BinaryOps BinOp = WO->getBinaryOp(); + Value *LHS = WO->getLHS(), *RHS = WO->getRHS(); + replaceInstUsesWith(*WO, UndefValue::get(WO->getType())); + eraseInstFromFunction(*WO); + return BinaryOperator::Create(BinOp, LHS, RHS); } + + // If the normal result of the add is dead, and the RHS is a constant, + // we can transform this into a range comparison. + // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 + if (WO->getIntrinsicID() == Intrinsic::uadd_with_overflow) + if (ConstantInt *CI = dyn_cast(WO->getRHS())) + return new ICmpInst(ICmpInst::ICMP_UGT, WO->getLHS(), + ConstantExpr::getNot(CI)); } } if (LoadInst *L = dyn_cast(Agg)) diff --git a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp index e9af934fbb6..bc4daf1c9c1 100644 --- a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp +++ b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @@ -399,59 +399,38 @@ static bool processSwitch(SwitchInst *SI, LazyValueInfo *LVI, } // See if we can prove that the given overflow intrinsic will not overflow. -static bool willNotOverflow(IntrinsicInst *II, LazyValueInfo *LVI) { - using OBO = OverflowingBinaryOperator; - auto NoWrap = [&] (Instruction::BinaryOps BinOp, unsigned NoWrapKind) { - Value *RHS = II->getOperand(1); - ConstantRange RRange = LVI->getConstantRange(RHS, II->getParent(), II); - ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion( - BinOp, RRange, NoWrapKind); - // As an optimization, do not compute LRange if we do not need it. - if (NWRegion.isEmptySet()) - return false; - Value *LHS = II->getOperand(0); - ConstantRange LRange = LVI->getConstantRange(LHS, II->getParent(), II); - return NWRegion.contains(LRange); - }; - switch (II->getIntrinsicID()) { - default: - break; - case Intrinsic::uadd_with_overflow: - return NoWrap(Instruction::Add, OBO::NoUnsignedWrap); - case Intrinsic::sadd_with_overflow: - return NoWrap(Instruction::Add, OBO::NoSignedWrap); - case Intrinsic::usub_with_overflow: - return NoWrap(Instruction::Sub, OBO::NoUnsignedWrap); - case Intrinsic::ssub_with_overflow: - return NoWrap(Instruction::Sub, OBO::NoSignedWrap); - } - return false; +static bool willNotOverflow(WithOverflowInst *WO, LazyValueInfo *LVI) { + // TODO: Also support multiplication. + Instruction::BinaryOps BinOp = WO->getBinaryOp(); + if (BinOp == Instruction::Mul) + return false; + + Value *RHS = WO->getRHS(); + ConstantRange RRange = LVI->getConstantRange(RHS, WO->getParent(), WO); + ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion( + BinOp, RRange, WO->getNoWrapKind()); + // As an optimization, do not compute LRange if we do not need it. + if (NWRegion.isEmptySet()) + return false; + Value *LHS = WO->getLHS(); + ConstantRange LRange = LVI->getConstantRange(LHS, WO->getParent(), WO); + return NWRegion.contains(LRange); } -static void processOverflowIntrinsic(IntrinsicInst *II) { - IRBuilder<> B(II); - Value *NewOp = nullptr; - switch (II->getIntrinsicID()) { - default: - llvm_unreachable("Unexpected instruction."); - case Intrinsic::uadd_with_overflow: - NewOp = B.CreateNUWAdd(II->getOperand(0), II->getOperand(1), II->getName()); - break; - case Intrinsic::sadd_with_overflow: - NewOp = B.CreateNSWAdd(II->getOperand(0), II->getOperand(1), II->getName()); - break; - case Intrinsic::usub_with_overflow: - NewOp = B.CreateNUWSub(II->getOperand(0), II->getOperand(1), II->getName()); - break; - case Intrinsic::ssub_with_overflow: - NewOp = B.CreateNSWSub(II->getOperand(0), II->getOperand(1), II->getName()); - break; - } +static void processOverflowIntrinsic(WithOverflowInst *WO) { + IRBuilder<> B(WO); + Value *NewOp = B.CreateBinOp( + WO->getBinaryOp(), WO->getLHS(), WO->getRHS(), WO->getName()); + if (WO->isSigned()) + cast(NewOp)->setHasNoSignedWrap(); + else + cast(NewOp)->setHasNoUnsignedWrap(); + + Value *NewI = B.CreateInsertValue(UndefValue::get(WO->getType()), NewOp, 0); + NewI = B.CreateInsertValue(NewI, ConstantInt::getFalse(WO->getContext()), 1); + WO->replaceAllUsesWith(NewI); + WO->eraseFromParent(); ++NumOverflows; - Value *NewI = B.CreateInsertValue(UndefValue::get(II->getType()), NewOp, 0); - NewI = B.CreateInsertValue(NewI, ConstantInt::getFalse(II->getContext()), 1); - II->replaceAllUsesWith(NewI); - II->eraseFromParent(); } /// Infer nonnull attributes for the arguments at the specified callsite. @@ -459,9 +438,9 @@ static bool processCallSite(CallSite CS, LazyValueInfo *LVI) { SmallVector ArgNos; unsigned ArgNo = 0; - if (auto *II = dyn_cast(CS.getInstruction())) { - if (willNotOverflow(II, LVI)) { - processOverflowIntrinsic(II); + if (auto *WO = dyn_cast(CS.getInstruction())) { + if (willNotOverflow(WO, LVI)) { + processOverflowIntrinsic(WO); return true; } } diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index a02f32f5643..985941eb3c4 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -329,36 +329,15 @@ GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) { e.type = EI->getType(); e.opcode = 0; - IntrinsicInst *I = dyn_cast(EI->getAggregateOperand()); - if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { - // EI might be an extract from one of our recognised intrinsics. If it - // is we'll synthesize a semantically equivalent expression instead on - // an extract value expression. - switch (I->getIntrinsicID()) { - case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: - e.opcode = Instruction::Add; - break; - case Intrinsic::ssub_with_overflow: - case Intrinsic::usub_with_overflow: - e.opcode = Instruction::Sub; - break; - case Intrinsic::smul_with_overflow: - case Intrinsic::umul_with_overflow: - e.opcode = Instruction::Mul; - break; - default: - break; - } - - if (e.opcode != 0) { - // Intrinsic recognized. Grab its args to finish building the expression. - assert(I->getNumArgOperands() == 2 && - "Expect two args for recognised intrinsics."); - e.varargs.push_back(lookupOrAdd(I->getArgOperand(0))); - e.varargs.push_back(lookupOrAdd(I->getArgOperand(1))); - return e; - } + WithOverflowInst *WO = dyn_cast(EI->getAggregateOperand()); + if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) { + // EI is an extract from one of our with.overflow intrinsics. Synthesize + // a semantically equivalent expression instead of an extract value + // expression. + e.opcode = WO->getBinaryOp(); + e.varargs.push_back(lookupOrAdd(WO->getLHS())); + e.varargs.push_back(lookupOrAdd(WO->getRHS())); + return e; } // Not a recognised intrinsic. Fall back to producing an extract value diff --git a/lib/Transforms/Scalar/NewGVN.cpp b/lib/Transforms/Scalar/NewGVN.cpp index 0474caf7c47..c54da4f72df 100644 --- a/lib/Transforms/Scalar/NewGVN.cpp +++ b/lib/Transforms/Scalar/NewGVN.cpp @@ -1814,39 +1814,13 @@ NewGVN::performSymbolicPHIEvaluation(ArrayRef PHIOps, const Expression * NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const { if (auto *EI = dyn_cast(I)) { - auto *II = dyn_cast(EI->getAggregateOperand()); - if (II && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) { - unsigned Opcode = 0; - // EI might be an extract from one of our recognised intrinsics. If it - // is we'll synthesize a semantically equivalent expression instead on - // an extract value expression. - switch (II->getIntrinsicID()) { - case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: - Opcode = Instruction::Add; - break; - case Intrinsic::ssub_with_overflow: - case Intrinsic::usub_with_overflow: - Opcode = Instruction::Sub; - break; - case Intrinsic::smul_with_overflow: - case Intrinsic::umul_with_overflow: - Opcode = Instruction::Mul; - break; - default: - break; - } - - if (Opcode != 0) { - // Intrinsic recognized. Grab its args to finish building the - // expression. - assert(II->getNumArgOperands() == 2 && - "Expect two args for recognised intrinsics."); - return createBinaryExpression(Opcode, EI->getType(), - II->getArgOperand(0), - II->getArgOperand(1), I); - } - } + auto *WO = dyn_cast(EI->getAggregateOperand()); + if (WO && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) + // EI is an extract from one of our with.overflow intrinsics. Synthesize + // a semantically equivalent expression instead of an extract value + // expression. + return createBinaryExpression(WO->getBinaryOp(), EI->getType(), + WO->getLHS(), WO->getRHS(), I); } return createAggregateValueExpression(I); -- 2.11.0