1 //===- Local.cpp - Functions to perform local transformations -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This family of functions perform various local transformations to the
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Local.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseMapInfo.h"
19 #include "llvm/ADT/DenseSet.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/ADT/TinyPtrVector.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/InstructionSimplify.h"
32 #include "llvm/Analysis/LazyValueInfo.h"
33 #include "llvm/Analysis/MemoryBuiltins.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/BinaryFormat/Dwarf.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/CFG.h"
41 #include "llvm/IR/CallSite.h"
42 #include "llvm/IR/Constant.h"
43 #include "llvm/IR/ConstantRange.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DIBuilder.h"
46 #include "llvm/IR/DataLayout.h"
47 #include "llvm/IR/DebugInfoMetadata.h"
48 #include "llvm/IR/DebugLoc.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/GetElementPtrTypeIterator.h"
53 #include "llvm/IR/GlobalObject.h"
54 #include "llvm/IR/IRBuilder.h"
55 #include "llvm/IR/InstrTypes.h"
56 #include "llvm/IR/Instruction.h"
57 #include "llvm/IR/Instructions.h"
58 #include "llvm/IR/IntrinsicInst.h"
59 #include "llvm/IR/Intrinsics.h"
60 #include "llvm/IR/LLVMContext.h"
61 #include "llvm/IR/MDBuilder.h"
62 #include "llvm/IR/Metadata.h"
63 #include "llvm/IR/Module.h"
64 #include "llvm/IR/Operator.h"
65 #include "llvm/IR/PatternMatch.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/Use.h"
68 #include "llvm/IR/User.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/IR/ValueHandle.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/ErrorHandling.h"
74 #include "llvm/Support/KnownBits.h"
75 #include "llvm/Support/raw_ostream.h"
85 using namespace llvm::PatternMatch;
87 #define DEBUG_TYPE "local"
89 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
91 //===----------------------------------------------------------------------===//
92 // Local constant propagation.
95 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
96 /// constant value, convert it into an unconditional branch to the constant
97 /// destination. This is a nontrivial operation because the successors of this
98 /// basic block must have their PHI nodes updated.
99 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
100 /// conditions and indirectbr addresses this might make dead if
101 /// DeleteDeadConditions is true.
102 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
103 const TargetLibraryInfo *TLI) {
104 TerminatorInst *T = BB->getTerminator();
105 IRBuilder<> Builder(T);
107 // Branch - See if we are conditional jumping on constant
108 if (BranchInst *BI = dyn_cast<BranchInst>(T)) {
109 if (BI->isUnconditional()) return false; // Can't optimize uncond branch
110 BasicBlock *Dest1 = BI->getSuccessor(0);
111 BasicBlock *Dest2 = BI->getSuccessor(1);
113 if (ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
114 // Are we branching on constant?
115 // YES. Change to unconditional branch...
116 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
117 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
119 //cerr << "Function: " << T->getParent()->getParent()
120 // << "\nRemoving branch from " << T->getParent()
121 // << "\n\nTo: " << OldDest << endl;
123 // Let the basic block know that we are letting go of it. Based on this,
124 // it will adjust it's PHI nodes.
125 OldDest->removePredecessor(BB);
127 // Replace the conditional branch with an unconditional one.
128 Builder.CreateBr(Destination);
129 BI->eraseFromParent();
133 if (Dest2 == Dest1) { // Conditional branch to same location?
134 // This branch matches something like this:
135 // br bool %cond, label %Dest, label %Dest
136 // and changes it into: br label %Dest
138 // Let the basic block know that we are letting go of one copy of it.
139 assert(BI->getParent() && "Terminator not inserted in block!");
140 Dest1->removePredecessor(BI->getParent());
142 // Replace the conditional branch with an unconditional one.
143 Builder.CreateBr(Dest1);
144 Value *Cond = BI->getCondition();
145 BI->eraseFromParent();
146 if (DeleteDeadConditions)
147 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
153 if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) {
154 // If we are switching on a constant, we can convert the switch to an
155 // unconditional branch.
156 ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition());
157 BasicBlock *DefaultDest = SI->getDefaultDest();
158 BasicBlock *TheOnlyDest = DefaultDest;
160 // If the default is unreachable, ignore it when searching for TheOnlyDest.
161 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
162 SI->getNumCases() > 0) {
163 TheOnlyDest = SI->case_begin()->getCaseSuccessor();
166 // Figure out which case it goes to.
167 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) {
168 // Found case matching a constant operand?
169 if (i->getCaseValue() == CI) {
170 TheOnlyDest = i->getCaseSuccessor();
174 // Check to see if this branch is going to the same place as the default
175 // dest. If so, eliminate it as an explicit compare.
176 if (i->getCaseSuccessor() == DefaultDest) {
177 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
178 unsigned NCases = SI->getNumCases();
179 // Fold the case metadata into the default if there will be any branches
180 // left, unless the metadata doesn't match the switch.
181 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) {
182 // Collect branch weights into a vector.
183 SmallVector<uint32_t, 8> Weights;
184 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e;
186 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i));
187 Weights.push_back(CI->getValue().getZExtValue());
189 // Merge weight of this case to the default weight.
190 unsigned idx = i->getCaseIndex();
191 Weights[0] += Weights[idx+1];
192 // Remove weight for this case.
193 std::swap(Weights[idx+1], Weights.back());
195 SI->setMetadata(LLVMContext::MD_prof,
196 MDBuilder(BB->getContext()).
197 createBranchWeights(Weights));
199 // Remove this entry.
200 DefaultDest->removePredecessor(SI->getParent());
201 i = SI->removeCase(i);
206 // Otherwise, check to see if the switch only branches to one destination.
207 // We do this by reseting "TheOnlyDest" to null when we find two non-equal
209 if (i->getCaseSuccessor() != TheOnlyDest)
210 TheOnlyDest = nullptr;
212 // Increment this iterator as we haven't removed the case.
216 if (CI && !TheOnlyDest) {
217 // Branching on a constant, but not any of the cases, go to the default
219 TheOnlyDest = SI->getDefaultDest();
222 // If we found a single destination that we can fold the switch into, do so
225 // Insert the new branch.
226 Builder.CreateBr(TheOnlyDest);
227 BasicBlock *BB = SI->getParent();
229 // Remove entries from PHI nodes which we no longer branch to...
230 for (BasicBlock *Succ : SI->successors()) {
231 // Found case matching a constant operand?
232 if (Succ == TheOnlyDest)
233 TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
235 Succ->removePredecessor(BB);
238 // Delete the old switch.
239 Value *Cond = SI->getCondition();
240 SI->eraseFromParent();
241 if (DeleteDeadConditions)
242 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
246 if (SI->getNumCases() == 1) {
247 // Otherwise, we can fold this switch into a conditional branch
248 // instruction if it has only one non-default destination.
249 auto FirstCase = *SI->case_begin();
250 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
251 FirstCase.getCaseValue(), "cond");
253 // Insert the new branch.
254 BranchInst *NewBr = Builder.CreateCondBr(Cond,
255 FirstCase.getCaseSuccessor(),
256 SI->getDefaultDest());
257 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof);
258 if (MD && MD->getNumOperands() == 3) {
259 ConstantInt *SICase =
260 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
262 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1));
263 assert(SICase && SIDef);
264 // The TrueWeight should be the weight for the single case of SI.
265 NewBr->setMetadata(LLVMContext::MD_prof,
266 MDBuilder(BB->getContext()).
267 createBranchWeights(SICase->getValue().getZExtValue(),
268 SIDef->getValue().getZExtValue()));
271 // Update make.implicit metadata to the newly-created conditional branch.
272 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
274 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
276 // Delete the old switch.
277 SI->eraseFromParent();
283 if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) {
284 // indirectbr blockaddress(@F, @BB) -> br label @BB
285 if (BlockAddress *BA =
286 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
287 BasicBlock *TheOnlyDest = BA->getBasicBlock();
288 // Insert the new branch.
289 Builder.CreateBr(TheOnlyDest);
291 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
292 if (IBI->getDestination(i) == TheOnlyDest)
293 TheOnlyDest = nullptr;
295 IBI->getDestination(i)->removePredecessor(IBI->getParent());
297 Value *Address = IBI->getAddress();
298 IBI->eraseFromParent();
299 if (DeleteDeadConditions)
300 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
302 // If we didn't find our destination in the IBI successor list, then we
303 // have undefined behavior. Replace the unconditional branch with an
304 // 'unreachable' instruction.
306 BB->getTerminator()->eraseFromParent();
307 new UnreachableInst(BB->getContext(), BB);
317 //===----------------------------------------------------------------------===//
318 // Local dead code elimination.
321 /// isInstructionTriviallyDead - Return true if the result produced by the
322 /// instruction is not used, and the instruction has no side effects.
324 bool llvm::isInstructionTriviallyDead(Instruction *I,
325 const TargetLibraryInfo *TLI) {
328 return wouldInstructionBeTriviallyDead(I, TLI);
331 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
332 const TargetLibraryInfo *TLI) {
333 if (isa<TerminatorInst>(I))
336 // We don't want the landingpad-like instructions removed by anything this
341 // We don't want debug info removed by anything this general, unless
342 // debug info is empty.
343 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
344 if (DDI->getAddress())
348 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
354 if (!I->mayHaveSideEffects())
357 // Special case intrinsics that "may have side effects" but can be deleted
359 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
360 // Safe to delete llvm.stacksave if dead.
361 if (II->getIntrinsicID() == Intrinsic::stacksave)
364 // Lifetime intrinsics are dead when their right-hand is undef.
365 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
366 II->getIntrinsicID() == Intrinsic::lifetime_end)
367 return isa<UndefValue>(II->getArgOperand(1));
369 // Assumptions are dead if their condition is trivially true. Guards on
370 // true are operationally no-ops. In the future we can consider more
371 // sophisticated tradeoffs for guards considering potential for check
372 // widening, but for now we keep things simple.
373 if (II->getIntrinsicID() == Intrinsic::assume ||
374 II->getIntrinsicID() == Intrinsic::experimental_guard) {
375 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
376 return !Cond->isZero();
382 if (isAllocLikeFn(I, TLI))
385 if (CallInst *CI = isFreeCall(I, TLI))
386 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
387 return C->isNullValue() || isa<UndefValue>(C);
389 if (CallSite CS = CallSite(I))
390 if (isMathLibCallNoop(CS, TLI))
396 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
397 /// trivially dead instruction, delete it. If that makes any of its operands
398 /// trivially dead, delete them too, recursively. Return true if any
399 /// instructions were deleted.
401 llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
402 const TargetLibraryInfo *TLI) {
403 Instruction *I = dyn_cast<Instruction>(V);
404 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
407 SmallVector<Instruction*, 16> DeadInsts;
408 DeadInsts.push_back(I);
411 I = DeadInsts.pop_back_val();
413 // Null out all of the instruction's operands to see if any operand becomes
415 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
416 Value *OpV = I->getOperand(i);
417 I->setOperand(i, nullptr);
419 if (!OpV->use_empty()) continue;
421 // If the operand is an instruction that became dead as we nulled out the
422 // operand, and if it is 'trivially' dead, delete it in a future loop
424 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
425 if (isInstructionTriviallyDead(OpI, TLI))
426 DeadInsts.push_back(OpI);
429 I->eraseFromParent();
430 } while (!DeadInsts.empty());
435 /// areAllUsesEqual - Check whether the uses of a value are all the same.
436 /// This is similar to Instruction::hasOneUse() except this will also return
437 /// true when there are no uses or multiple uses that all refer to the same
439 static bool areAllUsesEqual(Instruction *I) {
440 Value::user_iterator UI = I->user_begin();
441 Value::user_iterator UE = I->user_end();
446 for (++UI; UI != UE; ++UI) {
453 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
454 /// dead PHI node, due to being a def-use chain of single-use nodes that
455 /// either forms a cycle or is terminated by a trivially dead instruction,
456 /// delete it. If that makes any of its operands trivially dead, delete them
457 /// too, recursively. Return true if a change was made.
458 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
459 const TargetLibraryInfo *TLI) {
460 SmallPtrSet<Instruction*, 4> Visited;
461 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
462 I = cast<Instruction>(*I->user_begin())) {
464 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
466 // If we find an instruction more than once, we're on a cycle that
467 // won't prove fruitful.
468 if (!Visited.insert(I).second) {
469 // Break the cycle and delete the instruction and its operands.
470 I->replaceAllUsesWith(UndefValue::get(I->getType()));
471 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
479 simplifyAndDCEInstruction(Instruction *I,
480 SmallSetVector<Instruction *, 16> &WorkList,
481 const DataLayout &DL,
482 const TargetLibraryInfo *TLI) {
483 if (isInstructionTriviallyDead(I, TLI)) {
484 // Null out all of the instruction's operands to see if any operand becomes
486 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
487 Value *OpV = I->getOperand(i);
488 I->setOperand(i, nullptr);
490 if (!OpV->use_empty() || I == OpV)
493 // If the operand is an instruction that became dead as we nulled out the
494 // operand, and if it is 'trivially' dead, delete it in a future loop
496 if (Instruction *OpI = dyn_cast<Instruction>(OpV))
497 if (isInstructionTriviallyDead(OpI, TLI))
498 WorkList.insert(OpI);
501 I->eraseFromParent();
506 if (Value *SimpleV = SimplifyInstruction(I, DL)) {
507 // Add the users to the worklist. CAREFUL: an instruction can use itself,
508 // in the case of a phi node.
509 for (User *U : I->users()) {
511 WorkList.insert(cast<Instruction>(U));
515 // Replace the instruction with its simplified value.
516 bool Changed = false;
517 if (!I->use_empty()) {
518 I->replaceAllUsesWith(SimpleV);
521 if (isInstructionTriviallyDead(I, TLI)) {
522 I->eraseFromParent();
530 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
531 /// simplify any instructions in it and recursively delete dead instructions.
533 /// This returns true if it changed the code, note that it can delete
534 /// instructions in other blocks as well in this block.
535 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
536 const TargetLibraryInfo *TLI) {
537 bool MadeChange = false;
538 const DataLayout &DL = BB->getModule()->getDataLayout();
541 // In debug builds, ensure that the terminator of the block is never replaced
542 // or deleted by these simplifications. The idea of simplification is that it
543 // cannot introduce new instructions, and there is no way to replace the
544 // terminator of a block without introducing a new instruction.
545 AssertingVH<Instruction> TerminatorVH(&BB->back());
548 SmallSetVector<Instruction *, 16> WorkList;
549 // Iterate over the original function, only adding insts to the worklist
550 // if they actually need to be revisited. This avoids having to pre-init
551 // the worklist with the entire function's worth of instructions.
552 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
554 assert(!BI->isTerminator());
555 Instruction *I = &*BI;
558 // We're visiting this instruction now, so make sure it's not in the
559 // worklist from an earlier visit.
560 if (!WorkList.count(I))
561 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
564 while (!WorkList.empty()) {
565 Instruction *I = WorkList.pop_back_val();
566 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
571 //===----------------------------------------------------------------------===//
572 // Control Flow Graph Restructuring.
575 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
576 /// method is called when we're about to delete Pred as a predecessor of BB. If
577 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
579 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI
580 /// nodes that collapse into identity values. For example, if we have:
581 /// x = phi(1, 0, 0, 0)
584 /// .. and delete the predecessor corresponding to the '1', this will attempt to
585 /// recursively fold the and to 0.
586 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred) {
587 // This only adjusts blocks with PHI nodes.
588 if (!isa<PHINode>(BB->begin()))
591 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify
592 // them down. This will leave us with single entry phi nodes and other phis
593 // that can be removed.
594 BB->removePredecessor(Pred, true);
596 WeakTrackingVH PhiIt = &BB->front();
597 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) {
598 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt));
599 Value *OldPhiIt = PhiIt;
601 if (!recursivelySimplifyInstruction(PN))
604 // If recursive simplification ended up deleting the next PHI node we would
605 // iterate to, then our iterator is invalid, restart scanning from the top
607 if (PhiIt != OldPhiIt) PhiIt = &BB->front();
611 /// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its
612 /// predecessor is known to have one successor (DestBB!). Eliminate the edge
613 /// between them, moving the instructions in the predecessor into DestBB and
614 /// deleting the predecessor block.
615 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, DominatorTree *DT) {
616 // If BB has single-entry PHI nodes, fold them.
617 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
618 Value *NewVal = PN->getIncomingValue(0);
619 // Replace self referencing PHI with undef, it must be dead.
620 if (NewVal == PN) NewVal = UndefValue::get(PN->getType());
621 PN->replaceAllUsesWith(NewVal);
622 PN->eraseFromParent();
625 BasicBlock *PredBB = DestBB->getSinglePredecessor();
626 assert(PredBB && "Block doesn't have a single predecessor!");
628 // Zap anything that took the address of DestBB. Not doing this will give the
629 // address an invalid value.
630 if (DestBB->hasAddressTaken()) {
631 BlockAddress *BA = BlockAddress::get(DestBB);
632 Constant *Replacement =
633 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
634 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
636 BA->destroyConstant();
639 // Anything that branched to PredBB now branches to DestBB.
640 PredBB->replaceAllUsesWith(DestBB);
642 // Splice all the instructions from PredBB to DestBB.
643 PredBB->getTerminator()->eraseFromParent();
644 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList());
646 // If the PredBB is the entry block of the function, move DestBB up to
647 // become the entry block after we erase PredBB.
648 if (PredBB == &DestBB->getParent()->getEntryBlock())
649 DestBB->moveAfter(PredBB);
652 // For some irreducible CFG we end up having forward-unreachable blocks
653 // so check if getNode returns a valid node before updating the domtree.
654 if (DomTreeNode *DTN = DT->getNode(PredBB)) {
655 BasicBlock *PredBBIDom = DTN->getIDom()->getBlock();
656 DT->changeImmediateDominator(DestBB, PredBBIDom);
657 DT->eraseNode(PredBB);
661 PredBB->eraseFromParent();
664 /// CanMergeValues - Return true if we can choose one of these values to use
665 /// in place of the other. Note that we will always choose the non-undef
667 static bool CanMergeValues(Value *First, Value *Second) {
668 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
671 /// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an
672 /// almost-empty BB ending in an unconditional branch to Succ, into Succ.
674 /// Assumption: Succ is the single successor for BB.
675 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
676 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
678 DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
679 << Succ->getName() << "\n");
680 // Shortcut, if there is only a single predecessor it must be BB and merging
682 if (Succ->getSinglePredecessor()) return true;
684 // Make a list of the predecessors of BB
685 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
687 // Look at all the phi nodes in Succ, to see if they present a conflict when
688 // merging these blocks
689 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
690 PHINode *PN = cast<PHINode>(I);
692 // If the incoming value from BB is again a PHINode in
693 // BB which has the same incoming value for *PI as PN does, we can
694 // merge the phi nodes and then the blocks can still be merged
695 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
696 if (BBPN && BBPN->getParent() == BB) {
697 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
698 BasicBlock *IBB = PN->getIncomingBlock(PI);
699 if (BBPreds.count(IBB) &&
700 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
701 PN->getIncomingValue(PI))) {
702 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
703 << Succ->getName() << " is conflicting with "
704 << BBPN->getName() << " with regard to common predecessor "
705 << IBB->getName() << "\n");
710 Value* Val = PN->getIncomingValueForBlock(BB);
711 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
712 // See if the incoming value for the common predecessor is equal to the
713 // one for BB, in which case this phi node will not prevent the merging
715 BasicBlock *IBB = PN->getIncomingBlock(PI);
716 if (BBPreds.count(IBB) &&
717 !CanMergeValues(Val, PN->getIncomingValue(PI))) {
718 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in "
719 << Succ->getName() << " is conflicting with regard to common "
720 << "predecessor " << IBB->getName() << "\n");
730 using PredBlockVector = SmallVector<BasicBlock *, 16>;
731 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
733 /// \brief Determines the value to use as the phi node input for a block.
735 /// Select between \p OldVal any value that we know flows from \p BB
736 /// to a particular phi on the basis of which one (if either) is not
737 /// undef. Update IncomingValues based on the selected value.
739 /// \param OldVal The value we are considering selecting.
740 /// \param BB The block that the value flows in from.
741 /// \param IncomingValues A map from block-to-value for other phi inputs
742 /// that we have examined.
744 /// \returns the selected value.
745 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
746 IncomingValueMap &IncomingValues) {
747 if (!isa<UndefValue>(OldVal)) {
748 assert((!IncomingValues.count(BB) ||
749 IncomingValues.find(BB)->second == OldVal) &&
750 "Expected OldVal to match incoming value from BB!");
752 IncomingValues.insert(std::make_pair(BB, OldVal));
756 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
757 if (It != IncomingValues.end()) return It->second;
762 /// \brief Create a map from block to value for the operands of a
765 /// Create a map from block to value for each non-undef value flowing
768 /// \param PN The phi we are collecting the map for.
769 /// \param IncomingValues [out] The map from block to value for this phi.
770 static void gatherIncomingValuesToPhi(PHINode *PN,
771 IncomingValueMap &IncomingValues) {
772 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
773 BasicBlock *BB = PN->getIncomingBlock(i);
774 Value *V = PN->getIncomingValue(i);
776 if (!isa<UndefValue>(V))
777 IncomingValues.insert(std::make_pair(BB, V));
781 /// \brief Replace the incoming undef values to a phi with the values
782 /// from a block-to-value map.
784 /// \param PN The phi we are replacing the undefs in.
785 /// \param IncomingValues A map from block to value.
786 static void replaceUndefValuesInPhi(PHINode *PN,
787 const IncomingValueMap &IncomingValues) {
788 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
789 Value *V = PN->getIncomingValue(i);
791 if (!isa<UndefValue>(V)) continue;
793 BasicBlock *BB = PN->getIncomingBlock(i);
794 IncomingValueMap::const_iterator It = IncomingValues.find(BB);
795 if (It == IncomingValues.end()) continue;
797 PN->setIncomingValue(i, It->second);
801 /// \brief Replace a value flowing from a block to a phi with
802 /// potentially multiple instances of that value flowing from the
803 /// block's predecessors to the phi.
805 /// \param BB The block with the value flowing into the phi.
806 /// \param BBPreds The predecessors of BB.
807 /// \param PN The phi that we are updating.
808 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
809 const PredBlockVector &BBPreds,
811 Value *OldVal = PN->removeIncomingValue(BB, false);
812 assert(OldVal && "No entry in PHI for Pred BB!");
814 IncomingValueMap IncomingValues;
816 // We are merging two blocks - BB, and the block containing PN - and
817 // as a result we need to redirect edges from the predecessors of BB
818 // to go to the block containing PN, and update PN
819 // accordingly. Since we allow merging blocks in the case where the
820 // predecessor and successor blocks both share some predecessors,
821 // and where some of those common predecessors might have undef
822 // values flowing into PN, we want to rewrite those values to be
823 // consistent with the non-undef values.
825 gatherIncomingValuesToPhi(PN, IncomingValues);
827 // If this incoming value is one of the PHI nodes in BB, the new entries
828 // in the PHI node are the entries from the old PHI.
829 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
830 PHINode *OldValPN = cast<PHINode>(OldVal);
831 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
832 // Note that, since we are merging phi nodes and BB and Succ might
833 // have common predecessors, we could end up with a phi node with
834 // identical incoming branches. This will be cleaned up later (and
835 // will trigger asserts if we try to clean it up now, without also
836 // simplifying the corresponding conditional branch).
837 BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
838 Value *PredVal = OldValPN->getIncomingValue(i);
839 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
842 // And add a new incoming value for this predecessor for the
843 // newly retargeted branch.
844 PN->addIncoming(Selected, PredBB);
847 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
848 // Update existing incoming values in PN for this
849 // predecessor of BB.
850 BasicBlock *PredBB = BBPreds[i];
851 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
854 // And add a new incoming value for this predecessor for the
855 // newly retargeted branch.
856 PN->addIncoming(Selected, PredBB);
860 replaceUndefValuesInPhi(PN, IncomingValues);
863 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
864 /// unconditional branch, and contains no instructions other than PHI nodes,
865 /// potential side-effect free intrinsics and the branch. If possible,
866 /// eliminate BB by rewriting all the predecessors to branch to the successor
867 /// block and return true. If we can't transform, return false.
868 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
869 assert(BB != &BB->getParent()->getEntryBlock() &&
870 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
872 // We can't eliminate infinite loops.
873 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
874 if (BB == Succ) return false;
876 // Check to see if merging these blocks would cause conflicts for any of the
877 // phi nodes in BB or Succ. If not, we can safely merge.
878 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
880 // Check for cases where Succ has multiple predecessors and a PHI node in BB
881 // has uses which will not disappear when the PHI nodes are merged. It is
882 // possible to handle such cases, but difficult: it requires checking whether
883 // BB dominates Succ, which is non-trivial to calculate in the case where
884 // Succ has multiple predecessors. Also, it requires checking whether
885 // constructing the necessary self-referential PHI node doesn't introduce any
886 // conflicts; this isn't too difficult, but the previous code for doing this
889 // Note that if this check finds a live use, BB dominates Succ, so BB is
890 // something like a loop pre-header (or rarely, a part of an irreducible CFG);
891 // folding the branch isn't profitable in that case anyway.
892 if (!Succ->getSinglePredecessor()) {
893 BasicBlock::iterator BBI = BB->begin();
894 while (isa<PHINode>(*BBI)) {
895 for (Use &U : BBI->uses()) {
896 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
897 if (PN->getIncomingBlock(U) != BB)
907 DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
909 if (isa<PHINode>(Succ->begin())) {
910 // If there is more than one pred of succ, and there are PHI nodes in
911 // the successor, then we need to add incoming edges for the PHI nodes
913 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB));
915 // Loop over all of the PHI nodes in the successor of BB.
916 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
917 PHINode *PN = cast<PHINode>(I);
919 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
923 if (Succ->getSinglePredecessor()) {
924 // BB is the only predecessor of Succ, so Succ will end up with exactly
925 // the same predecessors BB had.
927 // Copy over any phi, debug or lifetime instruction.
928 BB->getTerminator()->eraseFromParent();
929 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(),
932 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
933 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
934 assert(PN->use_empty() && "There shouldn't be any uses here!");
935 PN->eraseFromParent();
939 // If the unconditional branch we replaced contains llvm.loop metadata, we
940 // add the metadata to the branch instructions in the predecessors.
941 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop");
942 Instruction *TI = BB->getTerminator();
944 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind))
945 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
946 BasicBlock *Pred = *PI;
947 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD);
950 // Everything that jumped to BB now goes to Succ.
951 BB->replaceAllUsesWith(Succ);
952 if (!Succ->hasName()) Succ->takeName(BB);
953 BB->eraseFromParent(); // Delete the old basic block.
957 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
958 /// nodes in this block. This doesn't try to be clever about PHI nodes
959 /// which differ only in the order of the incoming values, but instcombine
960 /// orders them so it usually won't matter.
961 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
962 // This implementation doesn't currently consider undef operands
963 // specially. Theoretically, two phis which are identical except for
964 // one having an undef where the other doesn't could be collapsed.
966 struct PHIDenseMapInfo {
967 static PHINode *getEmptyKey() {
968 return DenseMapInfo<PHINode *>::getEmptyKey();
971 static PHINode *getTombstoneKey() {
972 return DenseMapInfo<PHINode *>::getTombstoneKey();
975 static unsigned getHashValue(PHINode *PN) {
976 // Compute a hash value on the operands. Instcombine will likely have
977 // sorted them, which helps expose duplicates, but we have to check all
978 // the operands to be safe in case instcombine hasn't run.
979 return static_cast<unsigned>(hash_combine(
980 hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
981 hash_combine_range(PN->block_begin(), PN->block_end())));
984 static bool isEqual(PHINode *LHS, PHINode *RHS) {
985 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
986 RHS == getEmptyKey() || RHS == getTombstoneKey())
988 return LHS->isIdenticalTo(RHS);
992 // Set of unique PHINodes.
993 DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
996 bool Changed = false;
997 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
998 auto Inserted = PHISet.insert(PN);
999 if (!Inserted.second) {
1000 // A duplicate. Replace this PHI with its duplicate.
1001 PN->replaceAllUsesWith(*Inserted.first);
1002 PN->eraseFromParent();
1005 // The RAUW can change PHIs that we already visited. Start over from the
1015 /// enforceKnownAlignment - If the specified pointer points to an object that
1016 /// we control, modify the object's alignment to PrefAlign. This isn't
1017 /// often possible though. If alignment is important, a more reliable approach
1018 /// is to simply align all global variables and allocation instructions to
1019 /// their preferred alignment from the beginning.
1020 static unsigned enforceKnownAlignment(Value *V, unsigned Align,
1022 const DataLayout &DL) {
1023 assert(PrefAlign > Align);
1025 V = V->stripPointerCasts();
1027 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1028 // TODO: ideally, computeKnownBits ought to have used
1029 // AllocaInst::getAlignment() in its computation already, making
1030 // the below max redundant. But, as it turns out,
1031 // stripPointerCasts recurses through infinite layers of bitcasts,
1032 // while computeKnownBits is not allowed to traverse more than 6
1034 Align = std::max(AI->getAlignment(), Align);
1035 if (PrefAlign <= Align)
1038 // If the preferred alignment is greater than the natural stack alignment
1039 // then don't round up. This avoids dynamic stack realignment.
1040 if (DL.exceedsNaturalStackAlignment(PrefAlign))
1042 AI->setAlignment(PrefAlign);
1046 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1047 // TODO: as above, this shouldn't be necessary.
1048 Align = std::max(GO->getAlignment(), Align);
1049 if (PrefAlign <= Align)
1052 // If there is a large requested alignment and we can, bump up the alignment
1053 // of the global. If the memory we set aside for the global may not be the
1054 // memory used by the final program then it is impossible for us to reliably
1055 // enforce the preferred alignment.
1056 if (!GO->canIncreaseAlignment())
1059 GO->setAlignment(PrefAlign);
1066 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
1067 const DataLayout &DL,
1068 const Instruction *CxtI,
1069 AssumptionCache *AC,
1070 const DominatorTree *DT) {
1071 assert(V->getType()->isPointerTy() &&
1072 "getOrEnforceKnownAlignment expects a pointer!");
1074 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1075 unsigned TrailZ = Known.countMinTrailingZeros();
1077 // Avoid trouble with ridiculously large TrailZ values, such as
1078 // those computed from a null pointer.
1079 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
1081 unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ);
1083 // LLVM doesn't support alignments larger than this currently.
1084 Align = std::min(Align, +Value::MaximumAlignment);
1086 if (PrefAlign > Align)
1087 Align = enforceKnownAlignment(V, Align, PrefAlign, DL);
1089 // We don't need to make any adjustment.
1093 ///===---------------------------------------------------------------------===//
1094 /// Dbg Intrinsic utilities
1097 /// See if there is a dbg.value intrinsic for DIVar before I.
1098 static bool LdStHasDebugValue(DILocalVariable *DIVar, DIExpression *DIExpr,
1100 // Since we can't guarantee that the original dbg.declare instrinsic
1101 // is removed by LowerDbgDeclare(), we need to make sure that we are
1102 // not inserting the same dbg.value intrinsic over and over.
1103 BasicBlock::InstListType::iterator PrevI(I);
1104 if (PrevI != I->getParent()->getInstList().begin()) {
1106 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI))
1107 if (DVI->getValue() == I->getOperand(0) &&
1108 DVI->getVariable() == DIVar &&
1109 DVI->getExpression() == DIExpr)
1115 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1116 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1117 DIExpression *DIExpr,
1119 // Since we can't guarantee that the original dbg.declare instrinsic
1120 // is removed by LowerDbgDeclare(), we need to make sure that we are
1121 // not inserting the same dbg.value intrinsic over and over.
1122 SmallVector<DbgValueInst *, 1> DbgValues;
1123 findDbgValues(DbgValues, APN);
1124 for (auto *DVI : DbgValues) {
1125 assert(DVI->getValue() == APN);
1126 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1132 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1133 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1134 void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1135 StoreInst *SI, DIBuilder &Builder) {
1136 assert(DII->isAddressOfVariable());
1137 auto *DIVar = DII->getVariable();
1138 assert(DIVar && "Missing variable");
1139 auto *DIExpr = DII->getExpression();
1140 Value *DV = SI->getOperand(0);
1142 // If an argument is zero extended then use argument directly. The ZExt
1143 // may be zapped by an optimization pass in future.
1144 Argument *ExtendedArg = nullptr;
1145 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1146 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
1147 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1148 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
1150 // If this DII was already describing only a fragment of a variable, ensure
1151 // that fragment is appropriately narrowed here.
1152 // But if a fragment wasn't used, describe the value as the original
1153 // argument (rather than the zext or sext) so that it remains described even
1154 // if the sext/zext is optimized away. This widens the variable description,
1155 // leaving it up to the consumer to know how the smaller value may be
1156 // represented in a larger register.
1157 if (auto Fragment = DIExpr->getFragmentInfo()) {
1158 unsigned FragmentOffset = Fragment->OffsetInBits;
1159 SmallVector<uint64_t, 3> Ops(DIExpr->elements_begin(),
1160 DIExpr->elements_end() - 3);
1161 Ops.push_back(dwarf::DW_OP_LLVM_fragment);
1162 Ops.push_back(FragmentOffset);
1163 const DataLayout &DL = DII->getModule()->getDataLayout();
1164 Ops.push_back(DL.getTypeSizeInBits(ExtendedArg->getType()));
1165 DIExpr = Builder.createExpression(Ops);
1169 if (!LdStHasDebugValue(DIVar, DIExpr, SI))
1170 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, DII->getDebugLoc(),
1174 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1175 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic.
1176 void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1177 LoadInst *LI, DIBuilder &Builder) {
1178 auto *DIVar = DII->getVariable();
1179 auto *DIExpr = DII->getExpression();
1180 assert(DIVar && "Missing variable");
1182 if (LdStHasDebugValue(DIVar, DIExpr, LI))
1185 // We are now tracking the loaded value instead of the address. In the
1186 // future if multi-location support is added to the IR, it might be
1187 // preferable to keep tracking both the loaded value and the original
1188 // address in case the alloca can not be elided.
1189 Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1190 LI, DIVar, DIExpr, DII->getDebugLoc(), (Instruction *)nullptr);
1191 DbgValue->insertAfter(LI);
1194 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1195 /// llvm.dbg.declare or llvm.dbg.addr intrinsic.
1196 void llvm::ConvertDebugDeclareToDebugValue(DbgInfoIntrinsic *DII,
1197 PHINode *APN, DIBuilder &Builder) {
1198 auto *DIVar = DII->getVariable();
1199 auto *DIExpr = DII->getExpression();
1200 assert(DIVar && "Missing variable");
1202 if (PhiHasDebugValue(DIVar, DIExpr, APN))
1205 BasicBlock *BB = APN->getParent();
1206 auto InsertionPt = BB->getFirstInsertionPt();
1208 // The block may be a catchswitch block, which does not have a valid
1210 // FIXME: Insert dbg.value markers in the successors when appropriate.
1211 if (InsertionPt != BB->end())
1212 Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, DII->getDebugLoc(),
1216 /// Determine whether this alloca is either a VLA or an array.
1217 static bool isArray(AllocaInst *AI) {
1218 return AI->isArrayAllocation() ||
1219 AI->getType()->getElementType()->isArrayTy();
1222 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1223 /// of llvm.dbg.value intrinsics.
1224 bool llvm::LowerDbgDeclare(Function &F) {
1225 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1226 SmallVector<DbgDeclareInst *, 4> Dbgs;
1228 for (Instruction &BI : FI)
1229 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1230 Dbgs.push_back(DDI);
1235 for (auto &I : Dbgs) {
1236 DbgDeclareInst *DDI = I;
1237 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1238 // If this is an alloca for a scalar variable, insert a dbg.value
1239 // at each load and store to the alloca and erase the dbg.declare.
1240 // The dbg.values allow tracking a variable even if it is not
1241 // stored on the stack, while the dbg.declare can only describe
1242 // the stack slot (and at a lexical-scope granularity). Later
1243 // passes will attempt to elide the stack slot.
1244 if (AI && !isArray(AI)) {
1245 for (auto &AIUse : AI->uses()) {
1246 User *U = AIUse.getUser();
1247 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1248 if (AIUse.getOperandNo() == 1)
1249 ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1250 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1251 ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1252 } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1253 // This is a call by-value or some other instruction that
1254 // takes a pointer to the variable. Insert a *value*
1255 // intrinsic that describes the alloca.
1256 DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(),
1257 DDI->getExpression(), DDI->getDebugLoc(),
1261 DDI->eraseFromParent();
1267 /// Finds all intrinsics declaring local variables as living in the memory that
1268 /// 'V' points to. This may include a mix of dbg.declare and
1269 /// dbg.addr intrinsics.
1270 TinyPtrVector<DbgInfoIntrinsic *> llvm::FindDbgAddrUses(Value *V) {
1271 auto *L = LocalAsMetadata::getIfExists(V);
1274 auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L);
1278 TinyPtrVector<DbgInfoIntrinsic *> Declares;
1279 for (User *U : MDV->users()) {
1280 if (auto *DII = dyn_cast<DbgInfoIntrinsic>(U))
1281 if (DII->isAddressOfVariable())
1282 Declares.push_back(DII);
1288 void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) {
1289 if (auto *L = LocalAsMetadata::getIfExists(V))
1290 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1291 for (User *U : MDV->users())
1292 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
1293 DbgValues.push_back(DVI);
1296 static void findDbgUsers(SmallVectorImpl<DbgInfoIntrinsic *> &DbgUsers,
1298 if (auto *L = LocalAsMetadata::getIfExists(V))
1299 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L))
1300 for (User *U : MDV->users())
1301 if (DbgInfoIntrinsic *DII = dyn_cast<DbgInfoIntrinsic>(U))
1302 DbgUsers.push_back(DII);
1305 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1306 Instruction *InsertBefore, DIBuilder &Builder,
1307 bool DerefBefore, int Offset, bool DerefAfter) {
1308 auto DbgAddrs = FindDbgAddrUses(Address);
1309 for (DbgInfoIntrinsic *DII : DbgAddrs) {
1310 DebugLoc Loc = DII->getDebugLoc();
1311 auto *DIVar = DII->getVariable();
1312 auto *DIExpr = DII->getExpression();
1313 assert(DIVar && "Missing variable");
1314 DIExpr = DIExpression::prepend(DIExpr, DerefBefore, Offset, DerefAfter);
1315 // Insert llvm.dbg.declare immediately after InsertBefore, and remove old
1316 // llvm.dbg.declare.
1317 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, InsertBefore);
1318 if (DII == InsertBefore)
1319 InsertBefore = &*std::next(InsertBefore->getIterator());
1320 DII->eraseFromParent();
1322 return !DbgAddrs.empty();
1325 bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1326 DIBuilder &Builder, bool DerefBefore,
1327 int Offset, bool DerefAfter) {
1328 return replaceDbgDeclare(AI, NewAllocaAddress, AI->getNextNode(), Builder,
1329 DerefBefore, Offset, DerefAfter);
1332 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1333 DIBuilder &Builder, int Offset) {
1334 DebugLoc Loc = DVI->getDebugLoc();
1335 auto *DIVar = DVI->getVariable();
1336 auto *DIExpr = DVI->getExpression();
1337 assert(DIVar && "Missing variable");
1339 // This is an alloca-based llvm.dbg.value. The first thing it should do with
1340 // the alloca pointer is dereference it. Otherwise we don't know how to handle
1342 if (!DIExpr || DIExpr->getNumElements() < 1 ||
1343 DIExpr->getElement(0) != dwarf::DW_OP_deref)
1346 // Insert the offset immediately after the first deref.
1347 // We could just change the offset argument of dbg.value, but it's unsigned...
1349 SmallVector<uint64_t, 4> Ops;
1350 Ops.push_back(dwarf::DW_OP_deref);
1351 DIExpression::appendOffset(Ops, Offset);
1352 Ops.append(DIExpr->elements_begin() + 1, DIExpr->elements_end());
1353 DIExpr = Builder.createExpression(Ops);
1356 Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1357 DVI->eraseFromParent();
1360 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1361 DIBuilder &Builder, int Offset) {
1362 if (auto *L = LocalAsMetadata::getIfExists(AI))
1363 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1364 for (auto UI = MDV->use_begin(), UE = MDV->use_end(); UI != UE;) {
1366 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1367 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1371 void llvm::salvageDebugInfo(Instruction &I) {
1372 SmallVector<DbgValueInst *, 1> DbgValues;
1373 auto &M = *I.getModule();
1375 auto wrapMD = [&](Value *V) {
1376 return MetadataAsValue::get(I.getContext(), ValueAsMetadata::get(V));
1379 auto applyOffset = [&](DbgValueInst *DVI, uint64_t Offset) {
1380 auto *DIExpr = DVI->getExpression();
1381 DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset,
1382 DIExpression::NoDeref,
1383 DIExpression::WithStackValue);
1384 DVI->setOperand(0, wrapMD(I.getOperand(0)));
1385 DVI->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
1386 DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
1389 if (isa<BitCastInst>(&I) || isa<IntToPtrInst>(&I)) {
1390 // Bitcasts are entirely irrelevant for debug info. Rewrite dbg.value,
1391 // dbg.addr, and dbg.declare to use the cast's source.
1392 SmallVector<DbgInfoIntrinsic *, 1> DbgUsers;
1393 findDbgUsers(DbgUsers, &I);
1394 for (auto *DII : DbgUsers) {
1395 DII->setOperand(0, wrapMD(I.getOperand(0)));
1396 DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1398 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1399 findDbgValues(DbgValues, &I);
1400 for (auto *DVI : DbgValues) {
1402 M.getDataLayout().getPointerSizeInBits(GEP->getPointerAddressSpace());
1403 APInt Offset(BitWidth, 0);
1404 // Rewrite a constant GEP into a DIExpression. Since we are performing
1405 // arithmetic to compute the variable's *value* in the DIExpression, we
1406 // need to mark the expression with a DW_OP_stack_value.
1407 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset))
1408 // GEP offsets are i32 and thus always fit into an int64_t.
1409 applyOffset(DVI, Offset.getSExtValue());
1411 } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) {
1412 if (BI->getOpcode() == Instruction::Add)
1413 if (auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1)))
1414 if (ConstInt->getBitWidth() <= 64) {
1415 APInt Offset = ConstInt->getValue();
1416 findDbgValues(DbgValues, &I);
1417 for (auto *DVI : DbgValues)
1418 applyOffset(DVI, Offset.getSExtValue());
1420 } else if (isa<LoadInst>(&I)) {
1421 findDbgValues(DbgValues, &I);
1422 for (auto *DVI : DbgValues) {
1423 // Rewrite the load into DW_OP_deref.
1424 auto *DIExpr = DVI->getExpression();
1425 DIExpr = DIExpression::prepend(DIExpr, DIExpression::WithDeref);
1426 DVI->setOperand(0, wrapMD(I.getOperand(0)));
1427 DVI->setOperand(2, MetadataAsValue::get(I.getContext(), DIExpr));
1428 DEBUG(dbgs() << "SALVAGE: " << *DVI << '\n');
1433 unsigned llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
1434 unsigned NumDeadInst = 0;
1435 // Delete the instructions backwards, as it has a reduced likelihood of
1436 // having to update as many def-use and use-def chains.
1437 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1438 while (EndInst != &BB->front()) {
1439 // Delete the next to last instruction.
1440 Instruction *Inst = &*--EndInst->getIterator();
1441 if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
1442 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1443 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
1447 if (!isa<DbgInfoIntrinsic>(Inst))
1449 Inst->eraseFromParent();
1454 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap,
1455 bool PreserveLCSSA) {
1456 BasicBlock *BB = I->getParent();
1457 // Loop over all of the successors, removing BB's entry from any PHI
1459 for (BasicBlock *Successor : successors(BB))
1460 Successor->removePredecessor(BB, PreserveLCSSA);
1462 // Insert a call to llvm.trap right before this. This turns the undefined
1463 // behavior into a hard fail instead of falling through into random code.
1466 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap);
1467 CallInst *CallTrap = CallInst::Create(TrapFn, "", I);
1468 CallTrap->setDebugLoc(I->getDebugLoc());
1470 new UnreachableInst(I->getContext(), I);
1472 // All instructions after this are dead.
1473 unsigned NumInstrsRemoved = 0;
1474 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
1475 while (BBI != BBE) {
1476 if (!BBI->use_empty())
1477 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType()));
1478 BB->getInstList().erase(BBI++);
1481 return NumInstrsRemoved;
1484 /// changeToCall - Convert the specified invoke into a normal call.
1485 static void changeToCall(InvokeInst *II) {
1486 SmallVector<Value*, 8> Args(II->arg_begin(), II->arg_end());
1487 SmallVector<OperandBundleDef, 1> OpBundles;
1488 II->getOperandBundlesAsDefs(OpBundles);
1489 CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, OpBundles,
1491 NewCall->takeName(II);
1492 NewCall->setCallingConv(II->getCallingConv());
1493 NewCall->setAttributes(II->getAttributes());
1494 NewCall->setDebugLoc(II->getDebugLoc());
1495 II->replaceAllUsesWith(NewCall);
1497 // Follow the call by a branch to the normal destination.
1498 BranchInst::Create(II->getNormalDest(), II);
1500 // Update PHI nodes in the unwind destination
1501 II->getUnwindDest()->removePredecessor(II->getParent());
1502 II->eraseFromParent();
1505 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
1506 BasicBlock *UnwindEdge) {
1507 BasicBlock *BB = CI->getParent();
1509 // Convert this function call into an invoke instruction. First, split the
1512 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
1514 // Delete the unconditional branch inserted by splitBasicBlock
1515 BB->getInstList().pop_back();
1517 // Create the new invoke instruction.
1518 SmallVector<Value *, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
1519 SmallVector<OperandBundleDef, 1> OpBundles;
1521 CI->getOperandBundlesAsDefs(OpBundles);
1523 // Note: we're round tripping operand bundles through memory here, and that
1524 // can potentially be avoided with a cleverer API design that we do not have
1527 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge,
1528 InvokeArgs, OpBundles, CI->getName(), BB);
1529 II->setDebugLoc(CI->getDebugLoc());
1530 II->setCallingConv(CI->getCallingConv());
1531 II->setAttributes(CI->getAttributes());
1533 // Make sure that anything using the call now uses the invoke! This also
1534 // updates the CallGraph if present, because it uses a WeakTrackingVH.
1535 CI->replaceAllUsesWith(II);
1537 // Delete the original call
1538 Split->getInstList().pop_front();
1542 static bool markAliveBlocks(Function &F,
1543 SmallPtrSetImpl<BasicBlock*> &Reachable) {
1544 SmallVector<BasicBlock*, 128> Worklist;
1545 BasicBlock *BB = &F.front();
1546 Worklist.push_back(BB);
1547 Reachable.insert(BB);
1548 bool Changed = false;
1550 BB = Worklist.pop_back_val();
1552 // Do a quick scan of the basic block, turning any obviously unreachable
1553 // instructions into LLVM unreachable insts. The instruction combining pass
1554 // canonicalizes unreachable insts into stores to null or undef.
1555 for (Instruction &I : *BB) {
1556 // Assumptions that are known to be false are equivalent to unreachable.
1557 // Also, if the condition is undefined, then we make the choice most
1558 // beneficial to the optimizer, and choose that to also be unreachable.
1559 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
1560 if (II->getIntrinsicID() == Intrinsic::assume) {
1561 if (match(II->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
1562 // Don't insert a call to llvm.trap right before the unreachable.
1563 changeToUnreachable(II, false);
1569 if (II->getIntrinsicID() == Intrinsic::experimental_guard) {
1570 // A call to the guard intrinsic bails out of the current compilation
1571 // unit if the predicate passed to it is false. If the predicate is a
1572 // constant false, then we know the guard will bail out of the current
1573 // compile unconditionally, so all code following it is dead.
1575 // Note: unlike in llvm.assume, it is not "obviously profitable" for
1576 // guards to treat `undef` as `false` since a guard on `undef` can
1577 // still be useful for widening.
1578 if (match(II->getArgOperand(0), m_Zero()))
1579 if (!isa<UnreachableInst>(II->getNextNode())) {
1580 changeToUnreachable(II->getNextNode(), /*UseLLVMTrap=*/ false);
1587 if (auto *CI = dyn_cast<CallInst>(&I)) {
1588 Value *Callee = CI->getCalledValue();
1589 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1590 changeToUnreachable(CI, /*UseLLVMTrap=*/false);
1594 if (CI->doesNotReturn()) {
1595 // If we found a call to a no-return function, insert an unreachable
1596 // instruction after it. Make sure there isn't *already* one there
1598 if (!isa<UnreachableInst>(CI->getNextNode())) {
1599 // Don't insert a call to llvm.trap right before the unreachable.
1600 changeToUnreachable(CI->getNextNode(), false);
1607 // Store to undef and store to null are undefined and used to signal that
1608 // they should be changed to unreachable by passes that can't modify the
1610 if (auto *SI = dyn_cast<StoreInst>(&I)) {
1611 // Don't touch volatile stores.
1612 if (SI->isVolatile()) continue;
1614 Value *Ptr = SI->getOperand(1);
1616 if (isa<UndefValue>(Ptr) ||
1617 (isa<ConstantPointerNull>(Ptr) &&
1618 SI->getPointerAddressSpace() == 0)) {
1619 changeToUnreachable(SI, true);
1626 TerminatorInst *Terminator = BB->getTerminator();
1627 if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
1628 // Turn invokes that call 'nounwind' functions into ordinary calls.
1629 Value *Callee = II->getCalledValue();
1630 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1631 changeToUnreachable(II, true);
1633 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
1634 if (II->use_empty() && II->onlyReadsMemory()) {
1635 // jump to the normal destination branch.
1636 BranchInst::Create(II->getNormalDest(), II);
1637 II->getUnwindDest()->removePredecessor(II->getParent());
1638 II->eraseFromParent();
1643 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
1644 // Remove catchpads which cannot be reached.
1645 struct CatchPadDenseMapInfo {
1646 static CatchPadInst *getEmptyKey() {
1647 return DenseMapInfo<CatchPadInst *>::getEmptyKey();
1650 static CatchPadInst *getTombstoneKey() {
1651 return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
1654 static unsigned getHashValue(CatchPadInst *CatchPad) {
1655 return static_cast<unsigned>(hash_combine_range(
1656 CatchPad->value_op_begin(), CatchPad->value_op_end()));
1659 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
1660 if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
1661 RHS == getEmptyKey() || RHS == getTombstoneKey())
1663 return LHS->isIdenticalTo(RHS);
1667 // Set of unique CatchPads.
1668 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
1669 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
1671 detail::DenseSetEmpty Empty;
1672 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
1673 E = CatchSwitch->handler_end();
1675 BasicBlock *HandlerBB = *I;
1676 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
1677 if (!HandlerSet.insert({CatchPad, Empty}).second) {
1678 CatchSwitch->removeHandler(I);
1686 Changed |= ConstantFoldTerminator(BB, true);
1687 for (BasicBlock *Successor : successors(BB))
1688 if (Reachable.insert(Successor).second)
1689 Worklist.push_back(Successor);
1690 } while (!Worklist.empty());
1694 void llvm::removeUnwindEdge(BasicBlock *BB) {
1695 TerminatorInst *TI = BB->getTerminator();
1697 if (auto *II = dyn_cast<InvokeInst>(TI)) {
1702 TerminatorInst *NewTI;
1703 BasicBlock *UnwindDest;
1705 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
1706 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
1707 UnwindDest = CRI->getUnwindDest();
1708 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
1709 auto *NewCatchSwitch = CatchSwitchInst::Create(
1710 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
1711 CatchSwitch->getName(), CatchSwitch);
1712 for (BasicBlock *PadBB : CatchSwitch->handlers())
1713 NewCatchSwitch->addHandler(PadBB);
1715 NewTI = NewCatchSwitch;
1716 UnwindDest = CatchSwitch->getUnwindDest();
1718 llvm_unreachable("Could not find unwind successor");
1721 NewTI->takeName(TI);
1722 NewTI->setDebugLoc(TI->getDebugLoc());
1723 UnwindDest->removePredecessor(BB);
1724 TI->replaceAllUsesWith(NewTI);
1725 TI->eraseFromParent();
1728 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
1729 /// if they are in a dead cycle. Return true if a change was made, false
1730 /// otherwise. If `LVI` is passed, this function preserves LazyValueInfo
1731 /// after modifying the CFG.
1732 bool llvm::removeUnreachableBlocks(Function &F, LazyValueInfo *LVI) {
1733 SmallPtrSet<BasicBlock*, 16> Reachable;
1734 bool Changed = markAliveBlocks(F, Reachable);
1736 // If there are unreachable blocks in the CFG...
1737 if (Reachable.size() == F.size())
1740 assert(Reachable.size() < F.size());
1741 NumRemoved += F.size()-Reachable.size();
1743 // Loop over all of the basic blocks that are not reachable, dropping all of
1744 // their internal references...
1745 for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) {
1746 if (Reachable.count(&*BB))
1749 for (BasicBlock *Successor : successors(&*BB))
1750 if (Reachable.count(Successor))
1751 Successor->removePredecessor(&*BB);
1753 LVI->eraseBlock(&*BB);
1754 BB->dropAllReferences();
1757 for (Function::iterator I = ++F.begin(); I != F.end();)
1758 if (!Reachable.count(&*I))
1759 I = F.getBasicBlockList().erase(I);
1766 void llvm::combineMetadata(Instruction *K, const Instruction *J,
1767 ArrayRef<unsigned> KnownIDs) {
1768 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
1769 K->dropUnknownNonDebugMetadata(KnownIDs);
1770 K->getAllMetadataOtherThanDebugLoc(Metadata);
1771 for (const auto &MD : Metadata) {
1772 unsigned Kind = MD.first;
1773 MDNode *JMD = J->getMetadata(Kind);
1774 MDNode *KMD = MD.second;
1778 K->setMetadata(Kind, nullptr); // Remove unknown metadata
1780 case LLVMContext::MD_dbg:
1781 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
1782 case LLVMContext::MD_tbaa:
1783 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
1785 case LLVMContext::MD_alias_scope:
1786 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
1788 case LLVMContext::MD_noalias:
1789 case LLVMContext::MD_mem_parallel_loop_access:
1790 K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
1792 case LLVMContext::MD_range:
1793 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
1795 case LLVMContext::MD_fpmath:
1796 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
1798 case LLVMContext::MD_invariant_load:
1799 // Only set the !invariant.load if it is present in both instructions.
1800 K->setMetadata(Kind, JMD);
1802 case LLVMContext::MD_nonnull:
1803 // Only set the !nonnull if it is present in both instructions.
1804 K->setMetadata(Kind, JMD);
1806 case LLVMContext::MD_invariant_group:
1807 // Preserve !invariant.group in K.
1809 case LLVMContext::MD_align:
1810 K->setMetadata(Kind,
1811 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1813 case LLVMContext::MD_dereferenceable:
1814 case LLVMContext::MD_dereferenceable_or_null:
1815 K->setMetadata(Kind,
1816 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
1820 // Set !invariant.group from J if J has it. If both instructions have it
1821 // then we will just pick it from J - even when they are different.
1822 // Also make sure that K is load or store - f.e. combining bitcast with load
1823 // could produce bitcast with invariant.group metadata, which is invalid.
1824 // FIXME: we should try to preserve both invariant.group md if they are
1825 // different, but right now instruction can only have one invariant.group.
1826 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
1827 if (isa<LoadInst>(K) || isa<StoreInst>(K))
1828 K->setMetadata(LLVMContext::MD_invariant_group, JMD);
1831 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J) {
1832 unsigned KnownIDs[] = {
1833 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
1834 LLVMContext::MD_noalias, LLVMContext::MD_range,
1835 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
1836 LLVMContext::MD_invariant_group, LLVMContext::MD_align,
1837 LLVMContext::MD_dereferenceable,
1838 LLVMContext::MD_dereferenceable_or_null};
1839 combineMetadata(K, J, KnownIDs);
1842 template <typename RootType, typename DominatesFn>
1843 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
1844 const RootType &Root,
1845 const DominatesFn &Dominates) {
1846 assert(From->getType() == To->getType());
1849 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1852 if (!Dominates(Root, U))
1855 DEBUG(dbgs() << "Replace dominated use of '" << From->getName() << "' as "
1856 << *To << " in " << *U << "\n");
1862 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
1863 assert(From->getType() == To->getType());
1864 auto *BB = From->getParent();
1867 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
1870 auto *I = cast<Instruction>(U.getUser());
1871 if (I->getParent() == BB)
1879 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1881 const BasicBlockEdge &Root) {
1882 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
1883 return DT.dominates(Root, U);
1885 return ::replaceDominatedUsesWith(From, To, Root, Dominates);
1888 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
1890 const BasicBlock *BB) {
1891 auto ProperlyDominates = [&DT](const BasicBlock *BB, const Use &U) {
1892 auto *I = cast<Instruction>(U.getUser())->getParent();
1893 return DT.properlyDominates(BB, I);
1895 return ::replaceDominatedUsesWith(From, To, BB, ProperlyDominates);
1898 bool llvm::callsGCLeafFunction(ImmutableCallSite CS,
1899 const TargetLibraryInfo &TLI) {
1900 // Check if the function is specifically marked as a gc leaf function.
1901 if (CS.hasFnAttr("gc-leaf-function"))
1903 if (const Function *F = CS.getCalledFunction()) {
1904 if (F->hasFnAttribute("gc-leaf-function"))
1907 if (auto IID = F->getIntrinsicID())
1908 // Most LLVM intrinsics do not take safepoints.
1909 return IID != Intrinsic::experimental_gc_statepoint &&
1910 IID != Intrinsic::experimental_deoptimize;
1913 // Lib calls can be materialized by some passes, and won't be
1914 // marked as 'gc-leaf-function.' All available Libcalls are
1917 if (TLI.getLibFunc(CS, LF)) {
1924 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
1926 auto *NewTy = NewLI.getType();
1928 // This only directly applies if the new type is also a pointer.
1929 if (NewTy->isPointerTy()) {
1930 NewLI.setMetadata(LLVMContext::MD_nonnull, N);
1934 // The only other translation we can do is to integral loads with !range
1936 if (!NewTy->isIntegerTy())
1939 MDBuilder MDB(NewLI.getContext());
1940 const Value *Ptr = OldLI.getPointerOperand();
1941 auto *ITy = cast<IntegerType>(NewTy);
1942 auto *NullInt = ConstantExpr::getPtrToInt(
1943 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
1944 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
1945 NewLI.setMetadata(LLVMContext::MD_range,
1946 MDB.createRange(NonNullInt, NullInt));
1949 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
1950 MDNode *N, LoadInst &NewLI) {
1951 auto *NewTy = NewLI.getType();
1953 // Give up unless it is converted to a pointer where there is a single very
1954 // valuable mapping we can do reliably.
1955 // FIXME: It would be nice to propagate this in more ways, but the type
1956 // conversions make it hard.
1957 if (!NewTy->isPointerTy())
1960 unsigned BitWidth = DL.getTypeSizeInBits(NewTy);
1961 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
1962 MDNode *NN = MDNode::get(OldLI.getContext(), None);
1963 NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
1969 /// A potential constituent of a bitreverse or bswap expression. See
1970 /// collectBitParts for a fuller explanation.
1972 BitPart(Value *P, unsigned BW) : Provider(P) {
1973 Provenance.resize(BW);
1976 /// The Value that this is a bitreverse/bswap of.
1979 /// The "provenance" of each bit. Provenance[A] = B means that bit A
1980 /// in Provider becomes bit B in the result of this expression.
1981 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
1983 enum { Unset = -1 };
1986 } // end anonymous namespace
1988 /// Analyze the specified subexpression and see if it is capable of providing
1989 /// pieces of a bswap or bitreverse. The subexpression provides a potential
1990 /// piece of a bswap or bitreverse if it can be proven that each non-zero bit in
1991 /// the output of the expression came from a corresponding bit in some other
1992 /// value. This function is recursive, and the end result is a mapping of
1993 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
1994 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
1996 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
1997 /// that the expression deposits the low byte of %X into the high byte of the
1998 /// result and that all other bits are zero. This expression is accepted and a
1999 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
2002 /// To avoid revisiting values, the BitPart results are memoized into the
2003 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
2004 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
2005 /// store BitParts objects, not pointers. As we need the concept of a nullptr
2006 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
2007 /// type instead to provide the same functionality.
2009 /// Because we pass around references into \c BPS, we must use a container that
2010 /// does not invalidate internal references (std::map instead of DenseMap).
2011 static const Optional<BitPart> &
2012 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
2013 std::map<Value *, Optional<BitPart>> &BPS) {
2014 auto I = BPS.find(V);
2018 auto &Result = BPS[V] = None;
2019 auto BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2021 if (Instruction *I = dyn_cast<Instruction>(V)) {
2022 // If this is an or instruction, it may be an inner node of the bswap.
2023 if (I->getOpcode() == Instruction::Or) {
2024 auto &A = collectBitParts(I->getOperand(0), MatchBSwaps,
2025 MatchBitReversals, BPS);
2026 auto &B = collectBitParts(I->getOperand(1), MatchBSwaps,
2027 MatchBitReversals, BPS);
2031 // Try and merge the two together.
2032 if (!A->Provider || A->Provider != B->Provider)
2035 Result = BitPart(A->Provider, BitWidth);
2036 for (unsigned i = 0; i < A->Provenance.size(); ++i) {
2037 if (A->Provenance[i] != BitPart::Unset &&
2038 B->Provenance[i] != BitPart::Unset &&
2039 A->Provenance[i] != B->Provenance[i])
2040 return Result = None;
2042 if (A->Provenance[i] == BitPart::Unset)
2043 Result->Provenance[i] = B->Provenance[i];
2045 Result->Provenance[i] = A->Provenance[i];
2051 // If this is a logical shift by a constant, recurse then shift the result.
2052 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
2054 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
2055 // Ensure the shift amount is defined.
2056 if (BitShift > BitWidth)
2059 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2060 MatchBitReversals, BPS);
2065 // Perform the "shift" on BitProvenance.
2066 auto &P = Result->Provenance;
2067 if (I->getOpcode() == Instruction::Shl) {
2068 P.erase(std::prev(P.end(), BitShift), P.end());
2069 P.insert(P.begin(), BitShift, BitPart::Unset);
2071 P.erase(P.begin(), std::next(P.begin(), BitShift));
2072 P.insert(P.end(), BitShift, BitPart::Unset);
2078 // If this is a logical 'and' with a mask that clears bits, recurse then
2079 // unset the appropriate bits.
2080 if (I->getOpcode() == Instruction::And &&
2081 isa<ConstantInt>(I->getOperand(1))) {
2082 APInt Bit(I->getType()->getPrimitiveSizeInBits(), 1);
2083 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
2085 // Check that the mask allows a multiple of 8 bits for a bswap, for an
2087 unsigned NumMaskedBits = AndMask.countPopulation();
2088 if (!MatchBitReversals && NumMaskedBits % 8 != 0)
2091 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2092 MatchBitReversals, BPS);
2097 for (unsigned i = 0; i < BitWidth; ++i, Bit <<= 1)
2098 // If the AndMask is zero for this bit, clear the bit.
2099 if ((AndMask & Bit) == 0)
2100 Result->Provenance[i] = BitPart::Unset;
2104 // If this is a zext instruction zero extend the result.
2105 if (I->getOpcode() == Instruction::ZExt) {
2106 auto &Res = collectBitParts(I->getOperand(0), MatchBSwaps,
2107 MatchBitReversals, BPS);
2111 Result = BitPart(Res->Provider, BitWidth);
2112 auto NarrowBitWidth =
2113 cast<IntegerType>(cast<ZExtInst>(I)->getSrcTy())->getBitWidth();
2114 for (unsigned i = 0; i < NarrowBitWidth; ++i)
2115 Result->Provenance[i] = Res->Provenance[i];
2116 for (unsigned i = NarrowBitWidth; i < BitWidth; ++i)
2117 Result->Provenance[i] = BitPart::Unset;
2122 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
2123 // the input value to the bswap/bitreverse.
2124 Result = BitPart(V, BitWidth);
2125 for (unsigned i = 0; i < BitWidth; ++i)
2126 Result->Provenance[i] = i;
2130 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
2131 unsigned BitWidth) {
2132 if (From % 8 != To % 8)
2134 // Convert from bit indices to byte indices and check for a byte reversal.
2138 return From == BitWidth - To - 1;
2141 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
2142 unsigned BitWidth) {
2143 return From == BitWidth - To - 1;
2146 bool llvm::recognizeBSwapOrBitReverseIdiom(
2147 Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
2148 SmallVectorImpl<Instruction *> &InsertedInsts) {
2149 if (Operator::getOpcode(I) != Instruction::Or)
2151 if (!MatchBSwaps && !MatchBitReversals)
2153 IntegerType *ITy = dyn_cast<IntegerType>(I->getType());
2154 if (!ITy || ITy->getBitWidth() > 128)
2155 return false; // Can't do vectors or integers > 128 bits.
2156 unsigned BW = ITy->getBitWidth();
2158 unsigned DemandedBW = BW;
2159 IntegerType *DemandedTy = ITy;
2160 if (I->hasOneUse()) {
2161 if (TruncInst *Trunc = dyn_cast<TruncInst>(I->user_back())) {
2162 DemandedTy = cast<IntegerType>(Trunc->getType());
2163 DemandedBW = DemandedTy->getBitWidth();
2167 // Try to find all the pieces corresponding to the bswap.
2168 std::map<Value *, Optional<BitPart>> BPS;
2169 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS);
2172 auto &BitProvenance = Res->Provenance;
2174 // Now, is the bit permutation correct for a bswap or a bitreverse? We can
2175 // only byteswap values with an even number of bytes.
2176 bool OKForBSwap = DemandedBW % 16 == 0, OKForBitReverse = true;
2177 for (unsigned i = 0; i < DemandedBW; ++i) {
2179 bitTransformIsCorrectForBSwap(BitProvenance[i], i, DemandedBW);
2181 bitTransformIsCorrectForBitReverse(BitProvenance[i], i, DemandedBW);
2184 Intrinsic::ID Intrin;
2185 if (OKForBSwap && MatchBSwaps)
2186 Intrin = Intrinsic::bswap;
2187 else if (OKForBitReverse && MatchBitReversals)
2188 Intrin = Intrinsic::bitreverse;
2192 if (ITy != DemandedTy) {
2193 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
2194 Value *Provider = Res->Provider;
2195 IntegerType *ProviderTy = cast<IntegerType>(Provider->getType());
2196 // We may need to truncate the provider.
2197 if (DemandedTy != ProviderTy) {
2198 auto *Trunc = CastInst::Create(Instruction::Trunc, Provider, DemandedTy,
2200 InsertedInsts.push_back(Trunc);
2203 auto *CI = CallInst::Create(F, Provider, "rev", I);
2204 InsertedInsts.push_back(CI);
2205 auto *ExtInst = CastInst::Create(Instruction::ZExt, CI, ITy, "zext", I);
2206 InsertedInsts.push_back(ExtInst);
2210 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, ITy);
2211 InsertedInsts.push_back(CallInst::Create(F, Res->Provider, "rev", I));
2215 // CodeGen has special handling for some string functions that may replace
2216 // them with target-specific intrinsics. Since that'd skip our interceptors
2217 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
2218 // we mark affected calls as NoBuiltin, which will disable optimization
2220 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
2221 CallInst *CI, const TargetLibraryInfo *TLI) {
2222 Function *F = CI->getCalledFunction();
2224 if (F && !F->hasLocalLinkage() && F->hasName() &&
2225 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
2226 !F->doesNotAccessMemory())
2227 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin);
2230 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
2231 // We can't have a PHI with a metadata type.
2232 if (I->getOperand(OpIdx)->getType()->isMetadataTy())
2236 if (!isa<Constant>(I->getOperand(OpIdx)))
2239 switch (I->getOpcode()) {
2242 case Instruction::Call:
2243 case Instruction::Invoke:
2244 // Can't handle inline asm. Skip it.
2245 if (isa<InlineAsm>(ImmutableCallSite(I).getCalledValue()))
2247 // Many arithmetic intrinsics have no issue taking a
2248 // variable, however it's hard to distingish these from
2249 // specials such as @llvm.frameaddress that require a constant.
2250 if (isa<IntrinsicInst>(I))
2253 // Constant bundle operands may need to retain their constant-ness for
2255 if (ImmutableCallSite(I).isBundleOperand(OpIdx))
2258 case Instruction::ShuffleVector:
2259 // Shufflevector masks are constant.
2261 case Instruction::Switch:
2262 case Instruction::ExtractValue:
2263 // All operands apart from the first are constant.
2265 case Instruction::InsertValue:
2266 // All operands apart from the first and the second are constant.
2268 case Instruction::Alloca:
2269 // Static allocas (constant size in the entry block) are handled by
2270 // prologue/epilogue insertion so they're free anyway. We definitely don't
2271 // want to make them non-constant.
2272 return !dyn_cast<AllocaInst>(I)->isStaticAlloca();
2273 case Instruction::GetElementPtr:
2276 gep_type_iterator It = gep_type_begin(I);
2277 for (auto E = std::next(It, OpIdx); It != E; ++It)