#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/ISDOpcodes.h"
-#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/BypassSlowDivision.h"
-#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
#include <algorithm>
#include <cassert>
"addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
cl::desc("Allow combining of ScaledReg field in Address sinking."));
+static cl::opt<bool>
+ EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
+ cl::init(true),
+ cl::desc("Enable splitting large offset of GEP."));
+
namespace {
+enum ExtType {
+ ZeroExtension, // Zero extension has been seen.
+ SignExtension, // Sign extension has been seen.
+ BothExtension // This extension type is used if we saw sext after
+ // ZeroExtension had been set, or if we saw zext after
+ // SignExtension had been set. It makes the type
+ // information of a promoted instruction invalid.
+};
+
using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
-using TypeIsSExt = PointerIntPair<Type *, 1, bool>;
+using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
using SExts = SmallVector<Instruction *, 16>;
using ValueToSExts = DenseMap<Value *, SExts>;
/// Keep track of sext chains based on their initial value.
DenseMap<Value *, Instruction *> SeenChainsForSExt;
+ /// Keep track of GEPs accessing the same data structures such as structs or
+ /// arrays that are candidates to be split later because of their large
+ /// size.
+ DenseMap<
+ AssertingVH<Value>,
+ SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>>
+ LargeOffsetGEPMap;
+
+ /// Keep track of new GEP base after splitting the GEPs having large offset.
+ SmallSet<AssertingVH<Value>, 2> NewGEPBases;
+
+ /// Map serial numbers to Large offset GEPs.
+ DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
+
/// Keep track of SExt promoted.
ValueToSExts ValToSExtendedUses;
bool isPreheader);
bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT);
bool optimizeInst(Instruction *I, bool &ModifiedDT);
- bool optimizeMemoryInst(Instruction *I, Value *Addr,
- Type *AccessTy, unsigned AS);
+ bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
+ Type *AccessTy, unsigned AddrSpace);
bool optimizeInlineAsmInst(CallInst *CS);
bool optimizeCallInst(CallInst *CI, bool &ModifiedDT);
bool optimizeExt(Instruction *&I);
bool optimizeExtUses(Instruction *I);
- bool optimizeLoadExt(LoadInst *I);
+ bool optimizeLoadExt(LoadInst *Load);
bool optimizeSelectInst(SelectInst *SI);
- bool optimizeShuffleVectorInst(ShuffleVectorInst *SI);
- bool optimizeSwitchInst(SwitchInst *CI);
+ bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
+ bool optimizeSwitchInst(SwitchInst *SI);
bool optimizeExtractElementInst(Instruction *Inst);
bool dupRetToEnableTailCallOpts(BasicBlock *BB);
bool placeDbgValues(Function &F);
SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
unsigned CreatedInstsCost = 0);
bool mergeSExts(Function &F);
+ bool splitLargeGEPOffsets();
bool performAddressTypePromotion(
Instruction *&Inst,
bool AllowPromotionWithoutCommonHeader,
SeenChainsForSExt.clear();
ValToSExtendedUses.clear();
RemovedInsts.clear();
+ LargeOffsetGEPMap.clear();
+ LargeOffsetGEPID.clear();
for (Function::iterator I = F.begin(); I != F.end(); ) {
BasicBlock *BB = &*I++;
bool ModifiedDTOnIteration = false;
}
if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
MadeChange |= mergeSExts(F);
+ if (!LargeOffsetGEPMap.empty())
+ MadeChange |= splitLargeGEPOffsets();
// Really free removed instructions during promotion.
for (Instruction *I : RemovedInsts)
if (!DisableBranchOpts) {
MadeChange = false;
- SmallPtrSet<BasicBlock*, 8> WorkList;
+ // Use a set vector to get deterministic iteration order. The order the
+ // blocks are removed may affect whether or not PHI nodes in successors
+ // are removed.
+ SmallSetVector<BasicBlock*, 8> WorkList;
for (BasicBlock &BB : F) {
SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB));
MadeChange |= ConstantFoldTerminator(&BB, true);
// Delete the dead blocks and any of their dead successors.
MadeChange |= !WorkList.empty();
while (!WorkList.empty()) {
- BasicBlock *BB = *WorkList.begin();
- WorkList.erase(BB);
+ BasicBlock *BB = WorkList.pop_back_val();
SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
DeleteDeadBlock(BB);
bool CodeGenPrepare::eliminateFallThrough(Function &F) {
bool Changed = false;
// Scan all of the blocks in the function, except for the entry block.
- for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
- BasicBlock *BB = &*I++;
+ // Use a temporary array to avoid iterator being invalidated when
+ // deleting blocks.
+ SmallVector<WeakTrackingVH, 16> Blocks;
+ for (auto &Block : llvm::make_range(std::next(F.begin()), F.end()))
+ Blocks.push_back(&Block);
+
+ for (auto &Block : Blocks) {
+ auto *BB = cast_or_null<BasicBlock>(Block);
+ if (!BB)
+ continue;
// If the destination block has a single pred, then this is a trivial
// edge, just collapse it.
BasicBlock *SinglePred = BB->getSinglePredecessor();
BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
if (Term && !Term->isConditional()) {
Changed = true;
- DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n");
- // Remember if SinglePred was the entry block of the function.
- // If so, we will need to move BB back to the entry position.
- bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
- MergeBasicBlockIntoOnlyPred(BB, nullptr);
-
- if (isEntry && BB != &BB->getParent()->getEntryBlock())
- BB->moveBefore(&BB->getParent()->getEntryBlock());
+ LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
- // We have erased a block. Update the iterator.
- I = BB->getIterator();
+ // Merge BB into SinglePred and delete it.
+ MergeBlockIntoPredecessor(BB);
}
}
return Changed;
}
bool MadeChange = false;
+ // Copy blocks into a temporary array to avoid iterator invalidation issues
+ // as we remove them.
// Note that this intentionally skips the entry block.
- for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
- BasicBlock *BB = &*I++;
+ SmallVector<WeakTrackingVH, 16> Blocks;
+ for (auto &Block : llvm::make_range(std::next(F.begin()), F.end()))
+ Blocks.push_back(&Block);
+
+ for (auto &Block : Blocks) {
+ BasicBlock *BB = cast_or_null<BasicBlock>(Block);
+ if (!BB)
+ continue;
BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
if (!DestBB ||
!isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
BranchInst *BI = cast<BranchInst>(BB->getTerminator());
BasicBlock *DestBB = BI->getSuccessor(0);
- DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
+ LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
+ << *BB << *DestBB);
// If the destination block has a single pred, then this is a trivial edge,
// just collapse it.
if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
if (SinglePred != DestBB) {
- // Remember if SinglePred was the entry block of the function. If so, we
- // will need to move BB back to the entry position.
- bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
- MergeBasicBlockIntoOnlyPred(DestBB, nullptr);
-
- if (isEntry && BB != &BB->getParent()->getEntryBlock())
- BB->moveBefore(&BB->getParent()->getEntryBlock());
-
- DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
+ assert(SinglePred == BB &&
+ "Single predecessor not the same as predecessor");
+ // Merge DestBB into SinglePred/BB and delete it.
+ MergeBlockIntoPredecessor(DestBB);
+ // Note: BB(=SinglePred) will not be deleted on this path.
+ // DestBB(=its single successor) is the one that was deleted.
+ LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
return;
}
}
BB->eraseFromParent();
++NumBlocksElim;
- DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
+ LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
}
// Computes a map of base pointer relocation instructions to corresponding
assert(InsertPt != UserBB->end());
InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
CI->getType(), "", &*InsertPt);
+ InsertedCast->setDebugLoc(CI->getDebugLoc());
}
// Replace a use of the cast with a use of the new cast.
if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
return false;
- DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
- DEBUG(AndI->getParent()->dump());
+ LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
+ LLVM_DEBUG(AndI->getParent()->dump());
// Push the 'and' into the same block as the icmp 0. There should only be
// one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
// Preincrement use iterator so we don't invalidate it.
++UI;
- DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
+ LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
// Keep the 'and' in the same place if the use is already in the same block.
Instruction *InsertPt =
// Replace a use of the 'and' with a use of the new 'and'.
TheUse = InsertedAnd;
++NumAndUses;
- DEBUG(User->getParent()->dump());
+ LLVM_DEBUG(User->getParent()->dump());
}
// We removed all uses, nuke the and.
/// %x.extract.shift.1 = lshr i64 %arg1, 32
/// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
///
-/// CodeGen will recoginze the pattern in BB2 and generate BitExtract
+/// CodeGen will recognize the pattern in BB2 and generate BitExtract
/// instruction.
/// Return true if any changes are made.
static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
// cmp i16 trunc.result, opnd2
//
if (isa<TruncInst>(User) && shiftIsLegal
- // If the type of the truncate is legal, no trucate will be
+ // If the type of the truncate is legal, no truncate will be
// introduced in other basic blocks.
&&
(!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
InsertedInsts.insert(ExtVal);
return true;
}
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
+ case Intrinsic::strip_invariant_group:
II->replaceAllUsesWith(II->getArgOperand(0));
II->eraseFromParent();
return true;
namespace {
-/// \brief This class provides transaction based operation on the IR.
+/// This class provides transaction based operation on the IR.
/// Every change made through this class is recorded in the internal state and
/// can be undone (rollback) until commit is called.
class TypePromotionTransaction {
- /// \brief This represents the common interface of the individual transaction.
+ /// This represents the common interface of the individual transaction.
/// Each class implements the logic for doing one specific modification on
/// the IR via the TypePromotionTransaction.
class TypePromotionAction {
Instruction *Inst;
public:
- /// \brief Constructor of the action.
+ /// Constructor of the action.
/// The constructor performs the related action on the IR.
TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
virtual ~TypePromotionAction() = default;
- /// \brief Undo the modification done by this action.
+ /// Undo the modification done by this action.
/// When this method is called, the IR must be in the same state as it was
/// before this action was applied.
/// \pre Undoing the action works if and only if the IR is in the exact same
/// state as it was directly after this action was applied.
virtual void undo() = 0;
- /// \brief Advocate every change made by this action.
+ /// Advocate every change made by this action.
/// When the results on the IR of the action are to be kept, it is important
/// to call this function, otherwise hidden information may be kept forever.
virtual void commit() {
}
};
- /// \brief Utility to remember the position of an instruction.
+ /// Utility to remember the position of an instruction.
class InsertionHandler {
/// Position of an instruction.
/// Either an instruction:
/// - Is the first in a basic block: BB is used.
- /// - Has a previous instructon: PrevInst is used.
+ /// - Has a previous instruction: PrevInst is used.
union {
Instruction *PrevInst;
BasicBlock *BB;
bool HasPrevInstruction;
public:
- /// \brief Record the position of \p Inst.
+ /// Record the position of \p Inst.
InsertionHandler(Instruction *Inst) {
BasicBlock::iterator It = Inst->getIterator();
HasPrevInstruction = (It != (Inst->getParent()->begin()));
Point.BB = Inst->getParent();
}
- /// \brief Insert \p Inst at the recorded position.
+ /// Insert \p Inst at the recorded position.
void insert(Instruction *Inst) {
if (HasPrevInstruction) {
if (Inst->getParent())
}
};
- /// \brief Move an instruction before another.
+ /// Move an instruction before another.
class InstructionMoveBefore : public TypePromotionAction {
/// Original position of the instruction.
InsertionHandler Position;
public:
- /// \brief Move \p Inst before \p Before.
+ /// Move \p Inst before \p Before.
InstructionMoveBefore(Instruction *Inst, Instruction *Before)
: TypePromotionAction(Inst), Position(Inst) {
- DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n");
+ LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
+ << "\n");
Inst->moveBefore(Before);
}
- /// \brief Move the instruction back to its original position.
+ /// Move the instruction back to its original position.
void undo() override {
- DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
Position.insert(Inst);
}
};
- /// \brief Set the operand of an instruction with a new value.
+ /// Set the operand of an instruction with a new value.
class OperandSetter : public TypePromotionAction {
/// Original operand of the instruction.
Value *Origin;
unsigned Idx;
public:
- /// \brief Set \p Idx operand of \p Inst with \p NewVal.
+ /// Set \p Idx operand of \p Inst with \p NewVal.
OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
: TypePromotionAction(Inst), Idx(Idx) {
- DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
- << "for:" << *Inst << "\n"
- << "with:" << *NewVal << "\n");
+ LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
+ << "for:" << *Inst << "\n"
+ << "with:" << *NewVal << "\n");
Origin = Inst->getOperand(Idx);
Inst->setOperand(Idx, NewVal);
}
- /// \brief Restore the original value of the instruction.
+ /// Restore the original value of the instruction.
void undo() override {
- DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
- << "for: " << *Inst << "\n"
- << "with: " << *Origin << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
+ << "for: " << *Inst << "\n"
+ << "with: " << *Origin << "\n");
Inst->setOperand(Idx, Origin);
}
};
- /// \brief Hide the operands of an instruction.
+ /// Hide the operands of an instruction.
/// Do as if this instruction was not using any of its operands.
class OperandsHider : public TypePromotionAction {
/// The list of original operands.
SmallVector<Value *, 4> OriginalValues;
public:
- /// \brief Remove \p Inst from the uses of the operands of \p Inst.
+ /// Remove \p Inst from the uses of the operands of \p Inst.
OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
- DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
unsigned NumOpnds = Inst->getNumOperands();
OriginalValues.reserve(NumOpnds);
for (unsigned It = 0; It < NumOpnds; ++It) {
}
}
- /// \brief Restore the original list of uses.
+ /// Restore the original list of uses.
void undo() override {
- DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
Inst->setOperand(It, OriginalValues[It]);
}
};
- /// \brief Build a truncate instruction.
+ /// Build a truncate instruction.
class TruncBuilder : public TypePromotionAction {
Value *Val;
public:
- /// \brief Build a truncate instruction of \p Opnd producing a \p Ty
+ /// Build a truncate instruction of \p Opnd producing a \p Ty
/// result.
/// trunc Opnd to Ty.
TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
IRBuilder<> Builder(Opnd);
Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
- DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
}
- /// \brief Get the built value.
+ /// Get the built value.
Value *getBuiltValue() { return Val; }
- /// \brief Remove the built instruction.
+ /// Remove the built instruction.
void undo() override {
- DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
if (Instruction *IVal = dyn_cast<Instruction>(Val))
IVal->eraseFromParent();
}
};
- /// \brief Build a sign extension instruction.
+ /// Build a sign extension instruction.
class SExtBuilder : public TypePromotionAction {
Value *Val;
public:
- /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty
+ /// Build a sign extension instruction of \p Opnd producing a \p Ty
/// result.
/// sext Opnd to Ty.
SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
: TypePromotionAction(InsertPt) {
IRBuilder<> Builder(InsertPt);
Val = Builder.CreateSExt(Opnd, Ty, "promoted");
- DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
}
- /// \brief Get the built value.
+ /// Get the built value.
Value *getBuiltValue() { return Val; }
- /// \brief Remove the built instruction.
+ /// Remove the built instruction.
void undo() override {
- DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
if (Instruction *IVal = dyn_cast<Instruction>(Val))
IVal->eraseFromParent();
}
};
- /// \brief Build a zero extension instruction.
+ /// Build a zero extension instruction.
class ZExtBuilder : public TypePromotionAction {
Value *Val;
public:
- /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty
+ /// Build a zero extension instruction of \p Opnd producing a \p Ty
/// result.
/// zext Opnd to Ty.
ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
: TypePromotionAction(InsertPt) {
IRBuilder<> Builder(InsertPt);
Val = Builder.CreateZExt(Opnd, Ty, "promoted");
- DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
}
- /// \brief Get the built value.
+ /// Get the built value.
Value *getBuiltValue() { return Val; }
- /// \brief Remove the built instruction.
+ /// Remove the built instruction.
void undo() override {
- DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
if (Instruction *IVal = dyn_cast<Instruction>(Val))
IVal->eraseFromParent();
}
};
- /// \brief Mutate an instruction to another type.
+ /// Mutate an instruction to another type.
class TypeMutator : public TypePromotionAction {
/// Record the original type.
Type *OrigTy;
public:
- /// \brief Mutate the type of \p Inst into \p NewTy.
+ /// Mutate the type of \p Inst into \p NewTy.
TypeMutator(Instruction *Inst, Type *NewTy)
: TypePromotionAction(Inst), OrigTy(Inst->getType()) {
- DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
- << "\n");
+ LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
+ << "\n");
Inst->mutateType(NewTy);
}
- /// \brief Mutate the instruction back to its original type.
+ /// Mutate the instruction back to its original type.
void undo() override {
- DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
- << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
+ << "\n");
Inst->mutateType(OrigTy);
}
};
- /// \brief Replace the uses of an instruction by another instruction.
+ /// Replace the uses of an instruction by another instruction.
class UsesReplacer : public TypePromotionAction {
/// Helper structure to keep track of the replaced uses.
struct InstructionAndIdx {
using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
public:
- /// \brief Replace all the use of \p Inst by \p New.
+ /// Replace all the use of \p Inst by \p New.
UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
- DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
- << "\n");
+ LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
+ << "\n");
// Record the original uses.
for (Use &U : Inst->uses()) {
Instruction *UserI = cast<Instruction>(U.getUser());
Inst->replaceAllUsesWith(New);
}
- /// \brief Reassign the original uses of Inst to Inst.
+ /// Reassign the original uses of Inst to Inst.
void undo() override {
- DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
for (use_iterator UseIt = OriginalUses.begin(),
EndIt = OriginalUses.end();
UseIt != EndIt; ++UseIt) {
}
};
- /// \brief Remove an instruction from the IR.
+ /// Remove an instruction from the IR.
class InstructionRemover : public TypePromotionAction {
/// Original position of the instruction.
InsertionHandler Inserter;
SetOfInstrs &RemovedInsts;
public:
- /// \brief Remove all reference of \p Inst and optinally replace all its
+ /// Remove all reference of \p Inst and optionally replace all its
/// uses with New.
/// \p RemovedInsts Keep track of the instructions removed by this Action.
/// \pre If !Inst->use_empty(), then New != nullptr
RemovedInsts(RemovedInsts) {
if (New)
Replacer = new UsesReplacer(Inst, New);
- DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
RemovedInsts.insert(Inst);
/// The instructions removed here will be freed after completing
/// optimizeBlock() for all blocks as we need to keep track of the
~InstructionRemover() override { delete Replacer; }
- /// \brief Resurrect the instruction and reassign it to the proper uses if
+ /// Resurrect the instruction and reassign it to the proper uses if
/// new value was provided when build this action.
void undo() override {
- DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
+ LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
Inserter.insert(Inst);
if (Replacer)
Replacer->undo();
namespace {
-/// \brief A helper class for matching addressing modes.
+/// A helper class for matching addressing modes.
///
/// This encapsulates the logic for matching the target-legal addressing modes.
class AddressingModeMatcher {
/// The ongoing transaction where every action should be registered.
TypePromotionTransaction &TPT;
+ // A GEP which has too large offset to be folded into the addressing mode.
+ std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
+
/// This is set to true when we should not do profitability checks.
/// When true, IsProfitableToFoldIntoAddressingMode always returns true.
bool IgnoreProfitability;
- AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI,
- const TargetLowering &TLI,
- const TargetRegisterInfo &TRI,
- Type *AT, unsigned AS,
- Instruction *MI, ExtAddrMode &AM,
- const SetOfInstrs &InsertedInsts,
- InstrToOrigTy &PromotedInsts,
- TypePromotionTransaction &TPT)
+ AddressingModeMatcher(
+ SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI,
+ const TargetRegisterInfo &TRI, Type *AT, unsigned AS, Instruction *MI,
+ ExtAddrMode &AM, const SetOfInstrs &InsertedInsts,
+ InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
+ std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP)
: AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS),
MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts),
- PromotedInsts(PromotedInsts), TPT(TPT) {
+ PromotedInsts(PromotedInsts), TPT(TPT), LargeOffsetGEP(LargeOffsetGEP) {
IgnoreProfitability = false;
}
/// optimizations.
/// \p PromotedInsts maps the instructions to their type before promotion.
/// \p The ongoing transaction where every action should be registered.
- static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS,
- Instruction *MemoryInst,
- SmallVectorImpl<Instruction*> &AddrModeInsts,
- const TargetLowering &TLI,
- const TargetRegisterInfo &TRI,
- const SetOfInstrs &InsertedInsts,
- InstrToOrigTy &PromotedInsts,
- TypePromotionTransaction &TPT) {
+ static ExtAddrMode
+ Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
+ SmallVectorImpl<Instruction *> &AddrModeInsts,
+ const TargetLowering &TLI, const TargetRegisterInfo &TRI,
+ const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
+ TypePromotionTransaction &TPT,
+ std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP) {
ExtAddrMode Result;
- bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI,
- AccessTy, AS,
+ bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS,
MemoryInst, Result, InsertedInsts,
- PromotedInsts, TPT).matchAddr(V, 0);
+ PromotedInsts, TPT, LargeOffsetGEP)
+ .matchAddr(V, 0);
(void)Success; assert(Success && "Couldn't select *anything*?");
return Result;
}
private:
bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
- bool matchAddr(Value *V, unsigned Depth);
- bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth,
+ bool matchAddr(Value *Addr, unsigned Depth);
+ bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
bool *MovedAway = nullptr);
bool isProfitableToFoldIntoAddressingMode(Instruction *I,
ExtAddrMode &AMBefore,
Value *PromotedOperand) const;
};
-/// \brief Keep track of simplification of Phi nodes.
+/// Keep track of simplification of Phi nodes.
/// Accept the set of all phi nodes and erase phi node from this set
/// if it is simplified.
class SimplificationTracker {
}
};
-/// \brief A helper class for combining addressing modes.
+/// A helper class for combining addressing modes.
class AddressingModeCombiner {
typedef std::pair<Value *, BasicBlock *> ValueInBB;
typedef DenseMap<ValueInBB, Value *> FoldAddrToValueMapping;
AddressingModeCombiner(const SimplifyQuery &_SQ, ValueInBB OriginalValue)
: CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {}
- /// \brief Get the combined AddrMode
+ /// Get the combined AddrMode
const ExtAddrMode &getAddrMode() const {
return AddrModes[0];
}
- /// \brief Add a new AddrMode if it's compatible with the AddrModes we already
+ /// Add a new AddrMode if it's compatible with the AddrModes we already
/// have.
/// \return True iff we succeeded in doing so.
bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
return CanHandle;
}
- /// \brief Combine the addressing modes we've collected into a single
+ /// Combine the addressing modes we've collected into a single
/// addressing mode.
/// \return True iff we successfully combined them or we only had one so
/// didn't need to combine them anyway.
}
private:
- /// \brief Initialize Map with anchor values. For address seen in some BB
+ /// Initialize Map with anchor values. For address seen in some BB
/// we set the value of different field saw in this address.
/// If address is not an instruction than basic block is set to null.
/// At the same time we find a common type for different field we will
return true;
}
- /// \brief We have mapping between value A and basic block where value A
+ /// We have mapping between value A and basic block where value A
/// seen to other value B where B was a field in addressing mode represented
- /// by A. Also we have an original value C representin an address in some
+ /// by A. Also we have an original value C representing an address in some
/// basic block. Traversing from C through phi and selects we ended up with
/// A's in a map. This utility function tries to find a value V which is a
/// field in addressing mode C and traversing through phi nodes and selects
return Result;
}
- /// \brief Try to match PHI node to Candidate.
+ /// Try to match PHI node to Candidate.
/// Matcher tracks the matched Phi nodes.
bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
SmallSetVector<PHIPair, 8> &Matcher,
return true;
}
- /// \brief For the given set of PHI nodes (in the SimplificationTracker) try
+ /// For the given set of PHI nodes (in the SimplificationTracker) try
/// to find their equivalents.
/// Returns false if this matching fails and creation of new Phi is disabled.
bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
}
return true;
}
- /// \brief Fill the placeholder with values from predecessors and simplify it.
+ /// Fill the placeholder with values from predecessors and simplify it.
void FillPlaceholders(FoldAddrToValueMapping &Map,
SmallVectorImpl<ValueInBB> &TraverseOrder,
SimplificationTracker &ST) {
Instruction *CurrentI = cast<Instruction>(CurrentValue);
bool IsDefinedInThisBB = CurrentI->getParent() == CurrentBlock;
- unsigned PredCount =
- std::distance(pred_begin(CurrentBlock), pred_end(CurrentBlock));
+ unsigned PredCount = pred_size(CurrentBlock);
// if Current Value is not defined in this basic block we are interested
// in values in predecessors.
if (!IsDefinedInThisBB) {
// Don't touch identity bitcasts.
if (I->getType() == I->getOperand(0)->getType())
return false;
- return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
+ return I->getType()->isIntOrPtrTy();
case Instruction::PtrToInt:
// PtrToInt is always a noop, as we know that the int type is pointer sized.
return true;
}
}
-/// \brief Check whether or not \p Val is a legal instruction for \p TLI.
+/// Check whether or not \p Val is a legal instruction for \p TLI.
/// \note \p Val is assumed to be the product of some type promotion.
/// Therefore if \p Val has an undefined state in \p TLI, this is assumed
/// to be legal, as the non-promoted value would have had the same state.
namespace {
-/// \brief Hepler class to perform type promotion.
+/// Hepler class to perform type promotion.
class TypePromotionHelper {
- /// \brief Utility function to check whether or not a sign or zero extension
+ /// Utility function to add a promoted instruction \p ExtOpnd to
+ /// \p PromotedInsts and record the type of extension we have seen.
+ static void addPromotedInst(InstrToOrigTy &PromotedInsts,
+ Instruction *ExtOpnd,
+ bool IsSExt) {
+ ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
+ InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
+ if (It != PromotedInsts.end()) {
+ // If the new extension is same as original, the information in
+ // PromotedInsts[ExtOpnd] is still correct.
+ if (It->second.getInt() == ExtTy)
+ return;
+
+ // Now the new extension is different from old extension, we make
+ // the type information invalid by setting extension type to
+ // BothExtension.
+ ExtTy = BothExtension;
+ }
+ PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
+ }
+
+ /// Utility function to query the original type of instruction \p Opnd
+ /// with a matched extension type. If the extension doesn't match, we
+ /// cannot use the information we had on the original type.
+ /// BothExtension doesn't match any extension type.
+ static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
+ Instruction *Opnd,
+ bool IsSExt) {
+ ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
+ InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
+ if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
+ return It->second.getPointer();
+ return nullptr;
+ }
+
+ /// Utility function to check whether or not a sign or zero extension
/// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
/// either using the operands of \p Inst or promoting \p Inst.
/// The type of the extension is defined by \p IsSExt.
static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
const InstrToOrigTy &PromotedInsts, bool IsSExt);
- /// \brief Utility function to determine if \p OpIdx should be promoted when
+ /// Utility function to determine if \p OpIdx should be promoted when
/// promoting \p Inst.
static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
return !(isa<SelectInst>(Inst) && OpIdx == 0);
}
- /// \brief Utility function to promote the operand of \p Ext when this
+ /// Utility function to promote the operand of \p Ext when this
/// operand is a promotable trunc or sext or zext.
/// \p PromotedInsts maps the instructions to their type before promotion.
/// \p CreatedInstsCost[out] contains the cost of all instructions
SmallVectorImpl<Instruction *> *Exts,
SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
- /// \brief Utility function to promote the operand of \p Ext when this
+ /// Utility function to promote the operand of \p Ext when this
/// operand is promotable and is not a supported trunc or sext.
/// \p PromotedInsts maps the instructions to their type before promotion.
/// \p CreatedInstsCost[out] contains the cost of all the instructions
SmallVectorImpl<Instruction *> *Truncs,
const TargetLowering &TLI);
- /// \brief Given a sign/zero extend instruction \p Ext, return the approriate
+ /// Given a sign/zero extend instruction \p Ext, return the appropriate
/// action to promote the operand of \p Ext instead of using Ext.
/// \return NULL if no promotable action is possible with the current
/// sign extension.
(IsSExt && BinOp->hasNoSignedWrap())))
return true;
+ // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
+ if ((Inst->getOpcode() == Instruction::And ||
+ Inst->getOpcode() == Instruction::Or))
+ return true;
+
+ // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
+ if (Inst->getOpcode() == Instruction::Xor) {
+ const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1));
+ // Make sure it is not a NOT.
+ if (Cst && !Cst->getValue().isAllOnesValue())
+ return true;
+ }
+
+ // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
+ // It may change a poisoned value into a regular value, like
+ // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12
+ // poisoned value regular value
+ // It should be OK since undef covers valid value.
+ if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
+ return true;
+
+ // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
+ // It may change a poisoned value into a regular value, like
+ // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12
+ // poisoned value regular value
+ // It should be OK since undef covers valid value.
+ if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
+ const Instruction *ExtInst =
+ dyn_cast<const Instruction>(*Inst->user_begin());
+ if (ExtInst->hasOneUse()) {
+ const Instruction *AndInst =
+ dyn_cast<const Instruction>(*ExtInst->user_begin());
+ if (AndInst && AndInst->getOpcode() == Instruction::And) {
+ const ConstantInt *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
+ if (Cst &&
+ Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
+ return true;
+ }
+ }
+ }
+
// Check if we can do the following simplification.
// ext(trunc(opnd)) --> ext(opnd)
if (!isa<TruncInst>(Inst))
// I.e., check that trunc just drops extended bits of the same kind of
// the extension.
// #1 get the type of the operand and check the kind of the extended bits.
- const Type *OpndType;
- InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
- if (It != PromotedInsts.end() && It->second.getInt() == IsSExt)
- OpndType = It->second.getPointer();
+ const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
+ if (OpndType)
+ ;
else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
OpndType = Opnd->getOperand(0)->getType();
else
// Remember the original type of the instruction before promotion.
// This is useful to know that the high bits are sign extended bits.
- PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>(
- ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt)));
+ addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
// Step #1.
TPT.mutateType(ExtOpnd, Ext->getType());
// Step #2.
// Step #3.
Instruction *ExtForOpnd = Ext;
- DEBUG(dbgs() << "Propagate Ext to operands\n");
+ LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
++OpIdx) {
- DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
+ LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
!shouldExtOperand(ExtOpnd, OpIdx)) {
- DEBUG(dbgs() << "No need to propagate\n");
+ LLVM_DEBUG(dbgs() << "No need to propagate\n");
continue;
}
// Check if we can statically extend the operand.
Value *Opnd = ExtOpnd->getOperand(OpIdx);
if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
- DEBUG(dbgs() << "Statically extend\n");
+ LLVM_DEBUG(dbgs() << "Statically extend\n");
unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
: Cst->getValue().zext(BitWidth);
}
// UndefValue are typed, so we have to statically sign extend them.
if (isa<UndefValue>(Opnd)) {
- DEBUG(dbgs() << "Statically extend\n");
+ LLVM_DEBUG(dbgs() << "Statically extend\n");
TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
continue;
}
- // Otherwise we have to explicity sign extend the operand.
+ // Otherwise we have to explicitly sign extend the operand.
// Check if Ext was reused to extend an operand.
if (!ExtForOpnd) {
// If yes, create a new one.
- DEBUG(dbgs() << "More operands to ext\n");
+ LLVM_DEBUG(dbgs() << "More operands to ext\n");
Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
: TPT.createZExt(Ext, Opnd, Ext->getType());
if (!isa<Instruction>(ValForExtOpnd)) {
ExtForOpnd = nullptr;
}
if (ExtForOpnd == Ext) {
- DEBUG(dbgs() << "Extension is useless now\n");
+ LLVM_DEBUG(dbgs() << "Extension is useless now\n");
TPT.eraseInstruction(Ext);
}
return ExtOpnd;
/// \return True if the promotion is profitable, false otherwise.
bool AddressingModeMatcher::isPromotionProfitable(
unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
- DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n');
+ LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
+ << '\n');
// The cost of the new extensions is greater than the cost of the
// old extension plus what we folded.
// This is not profitable.
case Instruction::BitCast:
// BitCast is always a noop, and we can handle it as long as it is
// int->int or pointer->pointer (we don't want int<->fp or something).
- if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
- AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
+ if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
// Don't touch identity bitcasts. These were probably put here by LSR,
// and we don't want to mess around with them. Assume it knows what it
// is doing.
// Check to see if we can fold the base pointer in too.
if (matchAddr(AddrInst->getOperand(0), Depth+1))
return true;
+ } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
+ TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
+ ConstantOffset > 0) {
+ // Record GEPs with non-zero offsets as candidates for splitting in the
+ // event that the offset cannot fit into the r+i addressing mode.
+ // Simple and common case that only one GEP is used in calculating the
+ // address for the memory access.
+ Value *Base = AddrInst->getOperand(0);
+ auto *BaseI = dyn_cast<Instruction>(Base);
+ auto *GEP = cast<GetElementPtrInst>(AddrInst);
+ if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
+ (BaseI && !isa<CastInst>(BaseI) &&
+ !isa<GetElementPtrInst>(BaseI))) {
+ // If the base is an instruction, make sure the GEP is not in the same
+ // basic block as the base. If the base is an argument or global
+ // value, make sure the GEP is not in the entry block. Otherwise,
+ // instruction selection can undo the split. Also make sure the
+ // parent block allows inserting non-PHI instructions before the
+ // terminator.
+ BasicBlock *Parent =
+ BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock();
+ if (GEP->getParent() != Parent && !Parent->getTerminator()->isEHPad())
+ LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
+ }
}
AddrMode.BaseOffs -= ConstantOffset;
return false;
PromotedOperand)) {
AddrMode = BackupAddrMode;
AddrModeInsts.resize(OldSize);
- DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
+ LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
TPT.rollback(LastKnownGood);
return false;
}
// will tell us if the addressing mode for the memory operation will
// *actually* cover the shared instruction.
ExtAddrMode Result;
+ std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
+ 0);
TypePromotionTransaction::ConstRestorationPt LastKnownGood =
TPT.getRestorationPoint();
- AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI,
- AddressAccessTy, AS,
- MemoryInst, Result, InsertedInsts,
- PromotedInsts, TPT);
+ AddressingModeMatcher Matcher(
+ MatchedAddrModeInsts, TLI, TRI, AddressAccessTy, AS, MemoryInst, Result,
+ InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP);
Matcher.IgnoreProfitability = true;
bool Success = Matcher.matchAddr(Address, 0);
(void)Success; assert(Success && "Couldn't select *anything*?");
// the result may differ depending on what other uses our candidate
// addressing instructions might have.
AddrModeInsts.clear();
+ std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
+ 0);
ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI,
- InsertedInsts, PromotedInsts, TPT);
- NewAddrMode.OriginalValue = V;
+ InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP);
+ GetElementPtrInst *GEP = LargeOffsetGEP.first;
+ if (GEP && GEP->getParent() != MemoryInst->getParent() &&
+ !NewGEPBases.count(GEP)) {
+ // If splitting the underlying data structure can reduce the offset of a
+ // GEP, collect the GEP. Skip the GEPs that are the new bases of
+ // previously split data structures.
+ LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
+ if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end())
+ LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size();
+ }
+
+ NewAddrMode.OriginalValue = V;
if (!AddrModes.addNewAddrMode(NewAddrMode))
break;
}
if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
return IsNonLocalValue(V, MemoryInst->getParent());
})) {
- DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n");
+ LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
+ << "\n");
return false;
}
Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
if (SunkAddr) {
- DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
- << *MemoryInst << "\n");
+ LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
+ << " for " << *MemoryInst << "\n");
if (SunkAddr->getType() != Addr->getType())
SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
} else if (AddrSinkUsingGEPs ||
- (!AddrSinkUsingGEPs.getNumOccurrences() && TM &&
- SubtargetInfo->useAA())) {
+ (!AddrSinkUsingGEPs.getNumOccurrences() && TM && TTI->useAA())) {
// By default, we use the GEP-based method when AA is used later. This
// prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
- DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
- << *MemoryInst << "\n");
+ LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
+ << " for " << *MemoryInst << "\n");
Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
Value *ResultPtr = nullptr, *ResultIndex = nullptr;
DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
return false;
- DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
- << *MemoryInst << "\n");
+ LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
+ << " for " << *MemoryInst << "\n");
Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
Value *Result = nullptr;
return MadeChange;
}
-/// \brief Check if all the uses of \p Val are equivalent (or free) zero or
+/// Check if all the uses of \p Val are equivalent (or free) zero or
/// sign extensions.
static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
assert(!Val->use_empty() && "Input must have at least one use");
return true;
}
-/// \brief Try to speculatively promote extensions in \p Exts and continue
+/// Try to speculatively promote extensions in \p Exts and continue
/// promoting through newly promoted operands recursively as far as doing so is
/// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
/// When some promotion happened, \p TPT contains the proper state to revert
}
if (!DT.dominates(Pt, Inst))
// Give up if we need to merge in a common dominator as the
- // expermients show it is not profitable.
+ // experiments show it is not profitable.
continue;
Inst->replaceAllUsesWith(Pt);
RemovedInsts.insert(Inst);
return Changed;
}
+// Spliting large data structures so that the GEPs accessing them can have
+// smaller offsets so that they can be sunk to the same blocks as their users.
+// For example, a large struct starting from %base is splitted into two parts
+// where the second part starts from %new_base.
+//
+// Before:
+// BB0:
+// %base =
+//
+// BB1:
+// %gep0 = gep %base, off0
+// %gep1 = gep %base, off1
+// %gep2 = gep %base, off2
+//
+// BB2:
+// %load1 = load %gep0
+// %load2 = load %gep1
+// %load3 = load %gep2
+//
+// After:
+// BB0:
+// %base =
+// %new_base = gep %base, off0
+//
+// BB1:
+// %new_gep0 = %new_base
+// %new_gep1 = gep %new_base, off1 - off0
+// %new_gep2 = gep %new_base, off2 - off0
+//
+// BB2:
+// %load1 = load i32, i32* %new_gep0
+// %load2 = load i32, i32* %new_gep1
+// %load3 = load i32, i32* %new_gep2
+//
+// %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
+// their offsets are smaller enough to fit into the addressing mode.
+bool CodeGenPrepare::splitLargeGEPOffsets() {
+ bool Changed = false;
+ for (auto &Entry : LargeOffsetGEPMap) {
+ Value *OldBase = Entry.first;
+ SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>>
+ &LargeOffsetGEPs = Entry.second;
+ auto compareGEPOffset =
+ [&](const std::pair<GetElementPtrInst *, int64_t> &LHS,
+ const std::pair<GetElementPtrInst *, int64_t> &RHS) {
+ if (LHS.first == RHS.first)
+ return false;
+ if (LHS.second != RHS.second)
+ return LHS.second < RHS.second;
+ return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
+ };
+ // Sorting all the GEPs of the same data structures based on the offsets.
+ llvm::sort(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end(),
+ compareGEPOffset);
+ LargeOffsetGEPs.erase(
+ std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()),
+ LargeOffsetGEPs.end());
+ // Skip if all the GEPs have the same offsets.
+ if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second)
+ continue;
+ GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first;
+ int64_t BaseOffset = LargeOffsetGEPs.begin()->second;
+ Value *NewBaseGEP = nullptr;
+
+ auto LargeOffsetGEP = LargeOffsetGEPs.begin();
+ while (LargeOffsetGEP != LargeOffsetGEPs.end()) {
+ GetElementPtrInst *GEP = LargeOffsetGEP->first;
+ int64_t Offset = LargeOffsetGEP->second;
+ if (Offset != BaseOffset) {
+ TargetLowering::AddrMode AddrMode;
+ AddrMode.BaseOffs = Offset - BaseOffset;
+ // The result type of the GEP might not be the type of the memory
+ // access.
+ if (!TLI->isLegalAddressingMode(*DL, AddrMode,
+ GEP->getResultElementType(),
+ GEP->getAddressSpace())) {
+ // We need to create a new base if the offset to the current base is
+ // too large to fit into the addressing mode. So, a very large struct
+ // may be splitted into several parts.
+ BaseGEP = GEP;
+ BaseOffset = Offset;
+ NewBaseGEP = nullptr;
+ }
+ }
+
+ // Generate a new GEP to replace the current one.
+ IRBuilder<> Builder(GEP);
+ Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
+ Type *I8PtrTy =
+ Builder.getInt8PtrTy(GEP->getType()->getPointerAddressSpace());
+ Type *I8Ty = Builder.getInt8Ty();
+
+ if (!NewBaseGEP) {
+ // Create a new base if we don't have one yet. Find the insertion
+ // pointer for the new base first.
+ BasicBlock::iterator NewBaseInsertPt;
+ BasicBlock *NewBaseInsertBB;
+ if (auto *BaseI = dyn_cast<Instruction>(OldBase)) {
+ // If the base of the struct is an instruction, the new base will be
+ // inserted close to it.
+ NewBaseInsertBB = BaseI->getParent();
+ if (isa<PHINode>(BaseI))
+ NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
+ else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) {
+ NewBaseInsertBB =
+ SplitEdge(NewBaseInsertBB, Invoke->getNormalDest());
+ NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
+ } else
+ NewBaseInsertPt = std::next(BaseI->getIterator());
+ } else {
+ // If the current base is an argument or global value, the new base
+ // will be inserted to the entry block.
+ NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
+ NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
+ }
+ IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
+ // Create a new base.
+ Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset);
+ NewBaseGEP = OldBase;
+ if (NewBaseGEP->getType() != I8PtrTy)
+ NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy);
+ NewBaseGEP =
+ NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep");
+ NewGEPBases.insert(NewBaseGEP);
+ }
+
+ Value *NewGEP = NewBaseGEP;
+ if (Offset == BaseOffset) {
+ if (GEP->getType() != I8PtrTy)
+ NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType());
+ } else {
+ // Calculate the new offset for the new GEP.
+ Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset);
+ NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index);
+
+ if (GEP->getType() != I8PtrTy)
+ NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType());
+ }
+ GEP->replaceAllUsesWith(NewGEP);
+ LargeOffsetGEPID.erase(GEP);
+ LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP);
+ GEP->eraseFromParent();
+ Changed = true;
+ }
+ }
+ return Changed;
+}
+
/// Return true, if an ext(load) can be formed from an extension in
/// \p MovedExts.
bool CodeGenPrepare::canFormExtLd(
// x = phi x1', x2'
// y = and x, 0xff
bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
- if (!Load->isSimple() ||
- !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy()))
+ if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
return false;
// Skip loads we've already transformed.
namespace {
-/// \brief Helper class to promote a scalar operation to a vector one.
+/// Helper class to promote a scalar operation to a vector one.
/// This class is used to move downward extractelement transition.
/// E.g.,
/// a = vector_op <2 x i32>
/// Instruction that will be combined with the transition.
Instruction *CombineInst = nullptr;
- /// \brief The instruction that represents the current end of the transition.
+ /// The instruction that represents the current end of the transition.
/// Since we are faking the promotion until we reach the end of the chain
/// of computation, we need a way to get the current end of the transition.
Instruction *getEndOfTransition() const {
return InstsToBePromoted.back();
}
- /// \brief Return the index of the original value in the transition.
+ /// Return the index of the original value in the transition.
/// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
/// c, is at index 0.
unsigned getTransitionOriginalValueIdx() const {
return 0;
}
- /// \brief Return the index of the index in the transition.
+ /// Return the index of the index in the transition.
/// E.g., for "extractelement <2 x i32> c, i32 0" the index
/// is at index 1.
unsigned getTransitionIdx() const {
return 1;
}
- /// \brief Get the type of the transition.
+ /// Get the type of the transition.
/// This is the type of the original value.
/// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
/// transition is <2 x i32>.
return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
}
- /// \brief Promote \p ToBePromoted by moving \p Def downward through.
+ /// Promote \p ToBePromoted by moving \p Def downward through.
/// I.e., we have the following sequence:
/// Def = Transition <ty1> a to <ty2>
/// b = ToBePromoted <ty2> Def, ...
/// Def = Transition <ty1> ToBePromoted to <ty2>
void promoteImpl(Instruction *ToBePromoted);
- /// \brief Check whether or not it is profitable to promote all the
+ /// Check whether or not it is profitable to promote all the
/// instructions enqueued to be promoted.
bool isProfitableToPromote() {
Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
Arg0OVK, Arg1OVK);
}
- DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
- << ScalarCost << "\nVector: " << VectorCost << '\n');
+ LLVM_DEBUG(
+ dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
+ << ScalarCost << "\nVector: " << VectorCost << '\n');
return ScalarCost > VectorCost;
}
- /// \brief Generate a constant vector with \p Val with the same
+ /// Generate a constant vector with \p Val with the same
/// number of elements as the transition.
/// \p UseSplat defines whether or not \p Val should be replicated
/// across the whole vector.
return ConstantVector::get(ConstVec);
}
- /// \brief Check if promoting to a vector type an operand at \p OperandIdx
+ /// Check if promoting to a vector type an operand at \p OperandIdx
/// in \p Use can trigger undefined behavior.
static bool canCauseUndefinedBehavior(const Instruction *Use,
unsigned OperandIdx) {
assert(Transition && "Do not know how to promote null");
}
- /// \brief Check if we can promote \p ToBePromoted to \p Type.
+ /// Check if we can promote \p ToBePromoted to \p Type.
bool canPromote(const Instruction *ToBePromoted) const {
// We could support CastInst too.
return isa<BinaryOperator>(ToBePromoted);
}
- /// \brief Check if it is profitable to promote \p ToBePromoted
+ /// Check if it is profitable to promote \p ToBePromoted
/// by moving downward the transition through.
bool shouldPromote(const Instruction *ToBePromoted) const {
// Promote only if all the operands can be statically expanded.
ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
}
- /// \brief Check whether or not \p Use can be combined
+ /// Check whether or not \p Use can be combined
/// with the transition.
/// I.e., is it possible to do Use(Transition) => AnotherUse?
bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
- /// \brief Record \p ToBePromoted as part of the chain to be promoted.
+ /// Record \p ToBePromoted as part of the chain to be promoted.
void enqueueForPromotion(Instruction *ToBePromoted) {
InstsToBePromoted.push_back(ToBePromoted);
}
- /// \brief Set the instruction that will be combined with the transition.
+ /// Set the instruction that will be combined with the transition.
void recordCombineInstruction(Instruction *ToBeCombined) {
assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
CombineInst = ToBeCombined;
}
- /// \brief Promote all the instructions enqueued for promotion if it is
+ /// Promote all the instructions enqueued for promotion if it is
/// is profitable.
/// \return True if the promotion happened, false otherwise.
bool promote() {
// => we would need to check that we are moving it at a cheaper place and
// we do not do that for now.
BasicBlock *Parent = Inst->getParent();
- DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
+ LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
// If the transition has more than one use, assume this is not going to be
// beneficial.
while (Inst->hasOneUse()) {
Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
- DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
+ LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
if (ToBePromoted->getParent() != Parent) {
- DEBUG(dbgs() << "Instruction to promote is in a different block ("
- << ToBePromoted->getParent()->getName()
- << ") than the transition (" << Parent->getName() << ").\n");
+ LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
+ << ToBePromoted->getParent()->getName()
+ << ") than the transition (" << Parent->getName()
+ << ").\n");
return false;
}
if (VPH.canCombine(ToBePromoted)) {
- DEBUG(dbgs() << "Assume " << *Inst << '\n'
- << "will be combined with: " << *ToBePromoted << '\n');
+ LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
+ << "will be combined with: " << *ToBePromoted << '\n');
VPH.recordCombineInstruction(ToBePromoted);
bool Changed = VPH.promote();
NumStoreExtractExposed += Changed;
return Changed;
}
- DEBUG(dbgs() << "Try promoting.\n");
+ LLVM_DEBUG(dbgs() << "Try promoting.\n");
if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
return false;
- DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
+ LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
VPH.enqueueForPromotion(ToBePromoted);
Inst = ToBePromoted;
/// For the instruction sequence of store below, F and I values
/// are bundled together as an i64 value before being stored into memory.
-/// Sometimes it is more efficent to generate separate stores for F and I,
+/// Sometimes it is more efficient to generate separate stores for F and I,
/// which can remove the bitwise instructions or sink them to colder places.
///
/// (store (or (zext (bitcast F to i32) to i64),
/// The GEP operand must be a pointer, so must its result -> BitCast
Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
GEPI->getName(), GEPI);
+ NC->setDebugLoc(GEPI->getDebugLoc());
GEPI->replaceAllUsesWith(NC);
GEPI->eraseFromParent();
++NumGEPsElim;
// after it.
if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
continue;
- DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI);
+ LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
+ << *DVI << ' ' << *VI);
DVI->removeFromParent();
if (isa<PHINode>(VI))
DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
return MadeChange;
}
-/// \brief Scale down both weights to fit into uint32_t.
+/// Scale down both weights to fit into uint32_t.
static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1;
NewFalse = NewFalse / Scale;
}
-/// \brief Some targets prefer to split a conditional branch like:
+/// Some targets prefer to split a conditional branch like:
/// \code
/// %0 = icmp ne i32 %a, 0
/// %1 = icmp ne i32 %b, 0
!match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) )
continue;
- DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
+ LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
// Create a new BB.
auto TmpBB =
Br1->setCondition(Cond1);
LogicOp->eraseFromParent();
- // Depending on the conditon we have to either replace the true or the false
- // successor of the original branch instruction.
+ // Depending on the condition we have to either replace the true or the
+ // false successor of the original branch instruction.
if (Opc == Instruction::And)
Br1->setSuccessor(0, TmpBB);
else
// We have flexibility in setting Prob for BB1 and Prob for NewBB.
// The requirement is that
// TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
- // = TrueProb for orignal BB.
- // Assuming the orignal weights are A and B, one choice is to set BB1's
+ // = TrueProb for original BB.
+ // Assuming the original weights are A and B, one choice is to set BB1's
// weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
// assumes that
// TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
// We have flexibility in setting Prob for BB1 and Prob for TmpBB.
// The requirement is that
// FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
- // = FalseProb for orignal BB.
- // Assuming the orignal weights are A and B, one choice is to set BB1's
+ // = FalseProb for original BB.
+ // Assuming the original weights are A and B, one choice is to set BB1's
// weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
// assumes that
// FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
MadeChange = true;
- DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
- TmpBB->dump());
+ LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
+ TmpBB->dump());
}
return MadeChange;
}