1 //===- LiveIntervalAnalysis.cpp - Live Interval Analysis ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// \file This file implements the LiveInterval analysis pass which is used
11 /// by the Linear Scan Register allocator. This pass linearizes the
12 /// basic blocks of the function in DFS order and computes live intervals for
13 /// each virtual and physical register.
15 //===----------------------------------------------------------------------===//
17 #include "LiveRangeCalc.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/LiveInterval.h"
25 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
26 #include "llvm/CodeGen/LiveVariables.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
29 #include "llvm/CodeGen/MachineDominators.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstr.h"
32 #include "llvm/CodeGen/MachineInstrBundle.h"
33 #include "llvm/CodeGen/MachineOperand.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/Passes.h"
36 #include "llvm/CodeGen/SlotIndexes.h"
37 #include "llvm/CodeGen/VirtRegMap.h"
38 #include "llvm/MC/LaneBitmask.h"
39 #include "llvm/MC/MCRegisterInfo.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/BlockFrequency.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Compiler.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Target/TargetRegisterInfo.h"
48 #include "llvm/Target/TargetSubtargetInfo.h"
58 #define DEBUG_TYPE "regalloc"
60 char LiveIntervals::ID = 0;
61 char &llvm::LiveIntervalsID = LiveIntervals::ID;
62 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
63 "Live Interval Analysis", false, false)
64 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
65 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68 "Live Interval Analysis", false, false)
71 static cl::opt<bool> EnablePrecomputePhysRegs(
72 "precompute-phys-liveness", cl::Hidden,
73 cl::desc("Eagerly compute live intervals for all physreg units."));
75 static bool EnablePrecomputePhysRegs = false;
80 cl::opt<bool> UseSegmentSetForPhysRegs(
81 "use-segment-set-for-physregs", cl::Hidden, cl::init(true),
83 "Use segment set for the computation of the live ranges of physregs."));
85 } // end namespace llvm
87 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
89 AU.addRequired<AAResultsWrapperPass>();
90 AU.addPreserved<AAResultsWrapperPass>();
91 AU.addPreserved<LiveVariables>();
92 AU.addPreservedID(MachineLoopInfoID);
93 AU.addRequiredTransitiveID(MachineDominatorsID);
94 AU.addPreservedID(MachineDominatorsID);
95 AU.addPreserved<SlotIndexes>();
96 AU.addRequiredTransitive<SlotIndexes>();
97 MachineFunctionPass::getAnalysisUsage(AU);
100 LiveIntervals::LiveIntervals() : MachineFunctionPass(ID) {
101 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
104 LiveIntervals::~LiveIntervals() {
108 void LiveIntervals::releaseMemory() {
109 // Free the live intervals themselves.
110 for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
111 delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)];
112 VirtRegIntervals.clear();
113 RegMaskSlots.clear();
115 RegMaskBlocks.clear();
117 for (LiveRange *LR : RegUnitRanges)
119 RegUnitRanges.clear();
121 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
122 VNInfoAllocator.Reset();
125 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
127 MRI = &MF->getRegInfo();
128 TRI = MF->getSubtarget().getRegisterInfo();
129 TII = MF->getSubtarget().getInstrInfo();
130 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
131 Indexes = &getAnalysis<SlotIndexes>();
132 DomTree = &getAnalysis<MachineDominatorTree>();
135 LRCalc = new LiveRangeCalc();
137 // Allocate space for all virtual registers.
138 VirtRegIntervals.resize(MRI->getNumVirtRegs());
142 computeLiveInRegUnits();
144 if (EnablePrecomputePhysRegs) {
145 // For stress testing, precompute live ranges of all physical register
146 // units, including reserved registers.
147 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
154 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
155 OS << "********** INTERVALS **********\n";
157 // Dump the regunits.
158 for (unsigned Unit = 0, UnitE = RegUnitRanges.size(); Unit != UnitE; ++Unit)
159 if (LiveRange *LR = RegUnitRanges[Unit])
160 OS << PrintRegUnit(Unit, TRI) << ' ' << *LR << '\n';
162 // Dump the virtregs.
163 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
164 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
165 if (hasInterval(Reg))
166 OS << getInterval(Reg) << '\n';
170 for (SlotIndex Idx : RegMaskSlots)
177 void LiveIntervals::printInstrs(raw_ostream &OS) const {
178 OS << "********** MACHINEINSTRS **********\n";
179 MF->print(OS, Indexes);
182 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
183 LLVM_DUMP_METHOD void LiveIntervals::dumpInstrs() const {
188 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
189 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? huge_valf : 0.0F;
190 return new LiveInterval(reg, Weight);
193 /// Compute the live interval of a virtual register, based on defs and uses.
194 void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
195 assert(LRCalc && "LRCalc not initialized.");
196 assert(LI.empty() && "Should only compute empty intervals.");
197 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
198 LRCalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg));
199 computeDeadValues(LI, nullptr);
202 void LiveIntervals::computeVirtRegs() {
203 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
204 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
205 if (MRI->reg_nodbg_empty(Reg))
207 createAndComputeVirtRegInterval(Reg);
211 void LiveIntervals::computeRegMasks() {
212 RegMaskBlocks.resize(MF->getNumBlockIDs());
214 // Find all instructions with regmask operands.
215 for (const MachineBasicBlock &MBB : *MF) {
216 std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB.getNumber()];
217 RMB.first = RegMaskSlots.size();
219 // Some block starts, such as EH funclets, create masks.
220 if (const uint32_t *Mask = MBB.getBeginClobberMask(TRI)) {
221 RegMaskSlots.push_back(Indexes->getMBBStartIdx(&MBB));
222 RegMaskBits.push_back(Mask);
225 for (const MachineInstr &MI : MBB) {
226 for (const MachineOperand &MO : MI.operands()) {
229 RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
230 RegMaskBits.push_back(MO.getRegMask());
234 // Some block ends, such as funclet returns, create masks. Put the mask on
235 // the last instruction of the block, because MBB slot index intervals are
237 if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) {
238 assert(!MBB.empty() && "empty return block?");
239 RegMaskSlots.push_back(
240 Indexes->getInstructionIndex(MBB.back()).getRegSlot());
241 RegMaskBits.push_back(Mask);
244 // Compute the number of register mask instructions in this block.
245 RMB.second = RegMaskSlots.size() - RMB.first;
249 //===----------------------------------------------------------------------===//
250 // Register Unit Liveness
251 //===----------------------------------------------------------------------===//
253 // Fixed interference typically comes from ABI boundaries: Function arguments
254 // and return values are passed in fixed registers, and so are exception
255 // pointers entering landing pads. Certain instructions require values to be
256 // present in specific registers. That is also represented through fixed
260 /// Compute the live range of a register unit, based on the uses and defs of
261 /// aliasing registers. The range should be empty, or contain only dead
262 /// phi-defs from ABI blocks.
263 void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
264 assert(LRCalc && "LRCalc not initialized.");
265 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
267 // The physregs aliasing Unit are the roots and their super-registers.
268 // Create all values as dead defs before extending to uses. Note that roots
269 // may share super-registers. That's OK because createDeadDefs() is
270 // idempotent. It is very rare for a register unit to have multiple roots, so
271 // uniquing super-registers is probably not worthwhile.
272 bool IsReserved = true;
273 for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
274 for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
275 Super.isValid(); ++Super) {
276 unsigned Reg = *Super;
277 if (!MRI->reg_empty(Reg))
278 LRCalc->createDeadDefs(LR, Reg);
279 // A register unit is considered reserved if all its roots and all their
280 // super registers are reserved.
281 if (!MRI->isReserved(Reg))
286 // Now extend LR to reach all uses.
287 // Ignore uses of reserved registers. We only track defs of those.
289 for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) {
290 for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true);
291 Super.isValid(); ++Super) {
292 unsigned Reg = *Super;
293 if (!MRI->reg_empty(Reg))
294 LRCalc->extendToUses(LR, Reg);
299 // Flush the segment set to the segment vector.
300 if (UseSegmentSetForPhysRegs)
301 LR.flushSegmentSet();
304 /// Precompute the live ranges of any register units that are live-in to an ABI
305 /// block somewhere. Register values can appear without a corresponding def when
306 /// entering the entry block or a landing pad.
307 void LiveIntervals::computeLiveInRegUnits() {
308 RegUnitRanges.resize(TRI->getNumRegUnits());
309 DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
311 // Keep track of the live range sets allocated.
312 SmallVector<unsigned, 8> NewRanges;
314 // Check all basic blocks for live-ins.
315 for (const MachineBasicBlock &MBB : *MF) {
316 // We only care about ABI blocks: Entry + landing pads.
317 if ((&MBB != &MF->front() && !MBB.isEHPad()) || MBB.livein_empty())
320 // Create phi-defs at Begin for all live-in registers.
321 SlotIndex Begin = Indexes->getMBBStartIdx(&MBB);
322 DEBUG(dbgs() << Begin << "\tBB#" << MBB.getNumber());
323 for (const auto &LI : MBB.liveins()) {
324 for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) {
325 unsigned Unit = *Units;
326 LiveRange *LR = RegUnitRanges[Unit];
328 // Use segment set to speed-up initial computation of the live range.
329 LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs);
330 NewRanges.push_back(Unit);
332 VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator());
334 DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id);
337 DEBUG(dbgs() << '\n');
339 DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n");
341 // Compute the 'normal' part of the ranges.
342 for (unsigned Unit : NewRanges)
343 computeRegUnitRange(*RegUnitRanges[Unit], Unit);
346 static void createSegmentsForValues(LiveRange &LR,
347 iterator_range<LiveInterval::vni_iterator> VNIs) {
348 for (VNInfo *VNI : VNIs) {
351 SlotIndex Def = VNI->def;
352 LR.addSegment(LiveRange::Segment(Def, Def.getDeadSlot(), VNI));
356 using ShrinkToUsesWorkList = SmallVector<std::pair<SlotIndex, VNInfo*>, 16>;
358 static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
359 ShrinkToUsesWorkList &WorkList,
360 const LiveRange &OldRange) {
361 // Keep track of the PHIs that are in use.
362 SmallPtrSet<VNInfo*, 8> UsedPHIs;
363 // Blocks that have already been added to WorkList as live-out.
364 SmallPtrSet<const MachineBasicBlock*, 16> LiveOut;
366 // Extend intervals to reach all uses in WorkList.
367 while (!WorkList.empty()) {
368 SlotIndex Idx = WorkList.back().first;
369 VNInfo *VNI = WorkList.back().second;
371 const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Idx.getPrevSlot());
372 SlotIndex BlockStart = Indexes.getMBBStartIdx(MBB);
374 // Extend the live range for VNI to be live at Idx.
375 if (VNInfo *ExtVNI = LR.extendInBlock(BlockStart, Idx)) {
376 assert(ExtVNI == VNI && "Unexpected existing value number");
378 // Is this a PHIDef we haven't seen before?
379 if (!VNI->isPHIDef() || VNI->def != BlockStart ||
380 !UsedPHIs.insert(VNI).second)
382 // The PHI is live, make sure the predecessors are live-out.
383 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
384 if (!LiveOut.insert(Pred).second)
386 SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
387 // A predecessor is not required to have a live-out value for a PHI.
388 if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop))
389 WorkList.push_back(std::make_pair(Stop, PVNI));
394 // VNI is live-in to MBB.
395 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
396 LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));
398 // Make sure VNI is live-out from the predecessors.
399 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
400 if (!LiveOut.insert(Pred).second)
402 SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
403 assert(OldRange.getVNInfoBefore(Stop) == VNI &&
404 "Wrong value out of predecessor");
405 WorkList.push_back(std::make_pair(Stop, VNI));
410 bool LiveIntervals::shrinkToUses(LiveInterval *li,
411 SmallVectorImpl<MachineInstr*> *dead) {
412 DEBUG(dbgs() << "Shrink: " << *li << '\n');
413 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
414 && "Can only shrink virtual registers");
416 // Shrink subregister live ranges.
417 bool NeedsCleanup = false;
418 for (LiveInterval::SubRange &S : li->subranges()) {
419 shrinkToUses(S, li->reg);
424 li->removeEmptySubRanges();
426 // Find all the values used, including PHI kills.
427 ShrinkToUsesWorkList WorkList;
429 // Visit all instructions reading li->reg.
430 unsigned Reg = li->reg;
431 for (MachineInstr &UseMI : MRI->reg_instructions(Reg)) {
432 if (UseMI.isDebugValue() || !UseMI.readsVirtualRegister(Reg))
434 SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
435 LiveQueryResult LRQ = li->Query(Idx);
436 VNInfo *VNI = LRQ.valueIn();
438 // This shouldn't happen: readsVirtualRegister returns true, but there is
439 // no live value. It is likely caused by a target getting <undef> flags
441 DEBUG(dbgs() << Idx << '\t' << UseMI
442 << "Warning: Instr claims to read non-existent value in "
446 // Special case: An early-clobber tied operand reads and writes the
447 // register one slot early.
448 if (VNInfo *DefVNI = LRQ.valueDefined())
451 WorkList.push_back(std::make_pair(Idx, VNI));
454 // Create new live ranges with only minimal live segments per def.
456 createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end()));
457 extendSegmentsToUses(NewLR, *Indexes, WorkList, *li);
459 // Move the trimmed segments back.
460 li->segments.swap(NewLR.segments);
462 // Handle dead values.
463 bool CanSeparate = computeDeadValues(*li, dead);
464 DEBUG(dbgs() << "Shrunk: " << *li << '\n');
468 bool LiveIntervals::computeDeadValues(LiveInterval &LI,
469 SmallVectorImpl<MachineInstr*> *dead) {
470 bool MayHaveSplitComponents = false;
471 for (VNInfo *VNI : LI.valnos) {
474 SlotIndex Def = VNI->def;
475 LiveRange::iterator I = LI.FindSegmentContaining(Def);
476 assert(I != LI.end() && "Missing segment for VNI");
478 // Is the register live before? Otherwise we may have to add a read-undef
479 // flag for subregister defs.
480 unsigned VReg = LI.reg;
481 if (MRI->shouldTrackSubRegLiveness(VReg)) {
482 if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) {
483 MachineInstr *MI = getInstructionFromIndex(Def);
484 MI->setRegisterDefReadUndef(VReg);
488 if (I->end != Def.getDeadSlot())
490 if (VNI->isPHIDef()) {
491 // This is a dead PHI. Remove it.
494 DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n");
495 MayHaveSplitComponents = true;
497 // This is a dead def. Make sure the instruction knows.
498 MachineInstr *MI = getInstructionFromIndex(Def);
499 assert(MI && "No instruction defining live value");
500 MI->addRegisterDead(LI.reg, TRI);
501 if (dead && MI->allDefsAreDead()) {
502 DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
507 return MayHaveSplitComponents;
510 void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
511 DEBUG(dbgs() << "Shrink: " << SR << '\n');
512 assert(TargetRegisterInfo::isVirtualRegister(Reg)
513 && "Can only shrink virtual registers");
514 // Find all the values used, including PHI kills.
515 ShrinkToUsesWorkList WorkList;
517 // Visit all instructions reading Reg.
519 for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
520 // Skip "undef" uses.
523 // Maybe the operand is for a subregister we don't care about.
524 unsigned SubReg = MO.getSubReg();
526 LaneBitmask LaneMask = TRI->getSubRegIndexLaneMask(SubReg);
527 if ((LaneMask & SR.LaneMask).none())
530 // We only need to visit each instruction once.
531 MachineInstr *UseMI = MO.getParent();
532 SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot();
537 LiveQueryResult LRQ = SR.Query(Idx);
538 VNInfo *VNI = LRQ.valueIn();
539 // For Subranges it is possible that only undef values are left in that
540 // part of the subregister, so there is no real liverange at the use
544 // Special case: An early-clobber tied operand reads and writes the
545 // register one slot early.
546 if (VNInfo *DefVNI = LRQ.valueDefined())
549 WorkList.push_back(std::make_pair(Idx, VNI));
552 // Create a new live ranges with only minimal live segments per def.
554 createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end()));
555 extendSegmentsToUses(NewLR, *Indexes, WorkList, SR);
557 // Move the trimmed ranges back.
558 SR.segments.swap(NewLR.segments);
560 // Remove dead PHI value numbers
561 for (VNInfo *VNI : SR.valnos) {
564 const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def);
565 assert(Segment != nullptr && "Missing segment for VNI");
566 if (Segment->end != VNI->def.getDeadSlot())
568 if (VNI->isPHIDef()) {
569 // This is a dead PHI. Remove it.
570 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
572 SR.removeSegment(*Segment);
576 DEBUG(dbgs() << "Shrunk: " << SR << '\n');
579 void LiveIntervals::extendToIndices(LiveRange &LR,
580 ArrayRef<SlotIndex> Indices,
581 ArrayRef<SlotIndex> Undefs) {
582 assert(LRCalc && "LRCalc not initialized.");
583 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
584 for (SlotIndex Idx : Indices)
585 LRCalc->extend(LR, Idx, /*PhysReg=*/0, Undefs);
588 void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill,
589 SmallVectorImpl<SlotIndex> *EndPoints) {
590 LiveQueryResult LRQ = LR.Query(Kill);
591 VNInfo *VNI = LRQ.valueOutOrDead();
595 MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
596 SlotIndex MBBEnd = Indexes->getMBBEndIdx(KillMBB);
598 // If VNI isn't live out from KillMBB, the value is trivially pruned.
599 if (LRQ.endPoint() < MBBEnd) {
600 LR.removeSegment(Kill, LRQ.endPoint());
601 if (EndPoints) EndPoints->push_back(LRQ.endPoint());
605 // VNI is live out of KillMBB.
606 LR.removeSegment(Kill, MBBEnd);
607 if (EndPoints) EndPoints->push_back(MBBEnd);
609 // Find all blocks that are reachable from KillMBB without leaving VNI's live
610 // range. It is possible that KillMBB itself is reachable, so start a DFS
611 // from each successor.
612 using VisitedTy = df_iterator_default_set<MachineBasicBlock*,9>;
614 for (MachineBasicBlock *Succ : KillMBB->successors()) {
615 for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
616 I = df_ext_begin(Succ, Visited), E = df_ext_end(Succ, Visited);
618 MachineBasicBlock *MBB = *I;
620 // Check if VNI is live in to MBB.
621 SlotIndex MBBStart, MBBEnd;
622 std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
623 LiveQueryResult LRQ = LR.Query(MBBStart);
624 if (LRQ.valueIn() != VNI) {
625 // This block isn't part of the VNI segment. Prune the search.
630 // Prune the search if VNI is killed in MBB.
631 if (LRQ.endPoint() < MBBEnd) {
632 LR.removeSegment(MBBStart, LRQ.endPoint());
633 if (EndPoints) EndPoints->push_back(LRQ.endPoint());
638 // VNI is live through MBB.
639 LR.removeSegment(MBBStart, MBBEnd);
640 if (EndPoints) EndPoints->push_back(MBBEnd);
646 //===----------------------------------------------------------------------===//
647 // Register allocator hooks.
650 void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
651 // Keep track of regunit ranges.
652 SmallVector<std::pair<const LiveRange*, LiveRange::const_iterator>, 8> RU;
653 // Keep track of subregister ranges.
654 SmallVector<std::pair<const LiveInterval::SubRange*,
655 LiveRange::const_iterator>, 4> SRs;
657 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
658 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
659 if (MRI->reg_nodbg_empty(Reg))
661 const LiveInterval &LI = getInterval(Reg);
665 // Find the regunit intervals for the assigned register. They may overlap
666 // the virtual register live range, cancelling any kills.
668 for (MCRegUnitIterator Unit(VRM->getPhys(Reg), TRI); Unit.isValid();
670 const LiveRange &RURange = getRegUnit(*Unit);
673 RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end)));
676 if (MRI->subRegLivenessEnabled()) {
678 for (const LiveInterval::SubRange &SR : LI.subranges()) {
679 SRs.push_back(std::make_pair(&SR, SR.find(LI.begin()->end)));
683 // Every instruction that kills Reg corresponds to a segment range end
685 for (LiveInterval::const_iterator RI = LI.begin(), RE = LI.end(); RI != RE;
687 // A block index indicates an MBB edge.
688 if (RI->end.isBlock())
690 MachineInstr *MI = getInstructionFromIndex(RI->end);
694 // Check if any of the regunits are live beyond the end of RI. That could
695 // happen when a physreg is defined as a copy of a virtreg:
697 // %EAX = COPY %vreg5
698 // FOO %vreg5 <--- MI, cancel kill because %EAX is live.
701 // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX.
702 for (auto &RUP : RU) {
703 const LiveRange &RURange = *RUP.first;
704 LiveRange::const_iterator &I = RUP.second;
705 if (I == RURange.end())
707 I = RURange.advanceTo(I, RI->end);
708 if (I == RURange.end() || I->start >= RI->end)
710 // I is overlapping RI.
714 if (MRI->subRegLivenessEnabled()) {
715 // When reading a partial undefined value we must not add a kill flag.
716 // The regalloc might have used the undef lane for something else.
718 // %vreg1 = ... ; R32: %vreg1
719 // %vreg2:high16 = ... ; R64: %vreg2
720 // = read %vreg2<kill> ; R64: %vreg2
721 // = read %vreg1 ; R32: %vreg1
722 // The <kill> flag is correct for %vreg2, but the register allocator may
723 // assign R0L to %vreg1, and R0 to %vreg2 because the low 32bits of R0
724 // are actually never written by %vreg2. After assignment the <kill>
725 // flag at the read instruction is invalid.
726 LaneBitmask DefinedLanesMask;
728 // Compute a mask of lanes that are defined.
729 DefinedLanesMask = LaneBitmask::getNone();
730 for (auto &SRP : SRs) {
731 const LiveInterval::SubRange &SR = *SRP.first;
732 LiveRange::const_iterator &I = SRP.second;
735 I = SR.advanceTo(I, RI->end);
736 if (I == SR.end() || I->start >= RI->end)
738 // I is overlapping RI
739 DefinedLanesMask |= SR.LaneMask;
742 DefinedLanesMask = LaneBitmask::getAll();
744 bool IsFullWrite = false;
745 for (const MachineOperand &MO : MI->operands()) {
746 if (!MO.isReg() || MO.getReg() != Reg)
749 // Reading any undefined lanes?
750 LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
751 if ((UseMask & ~DefinedLanesMask).any())
753 } else if (MO.getSubReg() == 0) {
754 // Writing to the full register?
760 // If an instruction writes to a subregister, a new segment starts in
761 // the LiveInterval. But as this is only overriding part of the register
762 // adding kill-flags is not correct here after registers have been
765 // Next segment has to be adjacent in the subregister write case.
766 LiveRange::const_iterator N = std::next(RI);
767 if (N != LI.end() && N->start == RI->end)
772 MI->addRegisterKilled(Reg, nullptr);
775 MI->clearRegisterKills(Reg, nullptr);
781 LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
782 // A local live range must be fully contained inside the block, meaning it is
783 // defined and killed at instructions, not at block boundaries. It is not
784 // live in or or out of any block.
786 // It is technically possible to have a PHI-defined live range identical to a
787 // single block, but we are going to return false in that case.
789 SlotIndex Start = LI.beginIndex();
793 SlotIndex Stop = LI.endIndex();
797 // getMBBFromIndex doesn't need to search the MBB table when both indexes
798 // belong to proper instructions.
799 MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
800 MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
801 return MBB1 == MBB2 ? MBB1 : nullptr;
805 LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
806 for (const VNInfo *PHI : LI.valnos) {
807 if (PHI->isUnused() || !PHI->isPHIDef())
809 const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
810 // Conservatively return true instead of scanning huge predecessor lists.
811 if (PHIMBB->pred_size() > 100)
813 for (const MachineBasicBlock *Pred : PHIMBB->predecessors())
814 if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(Pred)))
820 float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
821 const MachineBlockFrequencyInfo *MBFI,
822 const MachineInstr &MI) {
823 BlockFrequency Freq = MBFI->getBlockFreq(MI.getParent());
824 const float Scale = 1.0f / MBFI->getEntryFreq();
825 return (isDef + isUse) * (Freq.getFrequency() * Scale);
829 LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr &startInst) {
830 LiveInterval& Interval = createEmptyInterval(reg);
831 VNInfo *VN = Interval.getNextValue(
832 SlotIndex(getInstructionIndex(startInst).getRegSlot()),
833 getVNInfoAllocator());
834 LiveRange::Segment S(SlotIndex(getInstructionIndex(startInst).getRegSlot()),
835 getMBBEndIdx(startInst.getParent()), VN);
836 Interval.addSegment(S);
841 //===----------------------------------------------------------------------===//
842 // Register mask functions
843 //===----------------------------------------------------------------------===//
845 bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
846 BitVector &UsableRegs) {
849 LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
851 // Use a smaller arrays for local live ranges.
852 ArrayRef<SlotIndex> Slots;
853 ArrayRef<const uint32_t*> Bits;
854 if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
855 Slots = getRegMaskSlotsInBlock(MBB->getNumber());
856 Bits = getRegMaskBitsInBlock(MBB->getNumber());
858 Slots = getRegMaskSlots();
859 Bits = getRegMaskBits();
862 // We are going to enumerate all the register mask slots contained in LI.
863 // Start with a binary search of RegMaskSlots to find a starting point.
864 ArrayRef<SlotIndex>::iterator SlotI =
865 std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
866 ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
868 // No slots in range, LI begins after the last call.
874 assert(*SlotI >= LiveI->start);
875 // Loop over all slots overlapping this segment.
876 while (*SlotI < LiveI->end) {
877 // *SlotI overlaps LI. Collect mask bits.
879 // This is the first overlap. Initialize UsableRegs to all ones.
881 UsableRegs.resize(TRI->getNumRegs(), true);
884 // Remove usable registers clobbered by this mask.
885 UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
886 if (++SlotI == SlotE)
889 // *SlotI is beyond the current LI segment.
890 LiveI = LI.advanceTo(LiveI, *SlotI);
893 // Advance SlotI until it overlaps.
894 while (*SlotI < LiveI->start)
895 if (++SlotI == SlotE)
900 //===----------------------------------------------------------------------===//
901 // IntervalUpdate class.
902 //===----------------------------------------------------------------------===//
904 /// Toolkit used by handleMove to trim or extend live intervals.
905 class LiveIntervals::HMEditor {
908 const MachineRegisterInfo& MRI;
909 const TargetRegisterInfo& TRI;
912 SmallPtrSet<LiveRange*, 8> Updated;
916 HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
917 const TargetRegisterInfo& TRI,
918 SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags)
919 : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx),
920 UpdateFlags(UpdateFlags) {}
922 // FIXME: UpdateFlags is a workaround that creates live intervals for all
923 // physregs, even those that aren't needed for regalloc, in order to update
924 // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
925 // flags, and postRA passes will use a live register utility instead.
926 LiveRange *getRegUnitLI(unsigned Unit) {
928 return &LIS.getRegUnit(Unit);
929 return LIS.getCachedRegUnit(Unit);
932 /// Update all live ranges touched by MI, assuming a move from OldIdx to
934 void updateAllRanges(MachineInstr *MI) {
935 DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI);
936 bool hasRegMask = false;
937 for (MachineOperand &MO : MI->operands()) {
945 // Aggressively clear all kill flags.
946 // They are reinserted by VirtRegRewriter.
950 unsigned Reg = MO.getReg();
953 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
954 LiveInterval &LI = LIS.getInterval(Reg);
955 if (LI.hasSubRanges()) {
956 unsigned SubReg = MO.getSubReg();
957 LaneBitmask LaneMask = SubReg ? TRI.getSubRegIndexLaneMask(SubReg)
958 : MRI.getMaxLaneMaskForVReg(Reg);
959 for (LiveInterval::SubRange &S : LI.subranges()) {
960 if ((S.LaneMask & LaneMask).none())
962 updateRange(S, Reg, S.LaneMask);
965 updateRange(LI, Reg, LaneBitmask::getNone());
969 // For physregs, only update the regunits that actually have a
970 // precomputed live range.
971 for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
972 if (LiveRange *LR = getRegUnitLI(*Units))
973 updateRange(*LR, *Units, LaneBitmask::getNone());
976 updateRegMaskSlots();
980 /// Update a single live range, assuming an instruction has been moved from
981 /// OldIdx to NewIdx.
982 void updateRange(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
983 if (!Updated.insert(&LR).second)
987 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
988 dbgs() << PrintReg(Reg);
990 dbgs() << " L" << PrintLaneMask(LaneMask);
992 dbgs() << PrintRegUnit(Reg, &TRI);
994 dbgs() << ":\t" << LR << '\n';
996 if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
999 handleMoveUp(LR, Reg, LaneMask);
1000 DEBUG(dbgs() << " -->\t" << LR << '\n');
1004 /// Update LR to reflect an instruction has been moved downwards from OldIdx
1005 /// to NewIdx (OldIdx < NewIdx).
1006 void handleMoveDown(LiveRange &LR) {
1007 LiveRange::iterator E = LR.end();
1008 // Segment going into OldIdx.
1009 LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
1011 // No value live before or after OldIdx? Nothing to do.
1012 if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
1015 LiveRange::iterator OldIdxOut;
1016 // Do we have a value live-in to OldIdx?
1017 if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
1018 // If the live-in value already extends to NewIdx, there is nothing to do.
1019 if (SlotIndex::isEarlierEqualInstr(NewIdx, OldIdxIn->end))
1021 // Aggressively remove all kill flags from the old kill point.
1022 // Kill flags shouldn't be used while live intervals exist, they will be
1023 // reinserted by VirtRegRewriter.
1024 if (MachineInstr *KillMI = LIS.getInstructionFromIndex(OldIdxIn->end))
1025 for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO)
1026 if (MO->isReg() && MO->isUse())
1027 MO->setIsKill(false);
1029 // Is there a def before NewIdx which is not OldIdx?
1030 LiveRange::iterator Next = std::next(OldIdxIn);
1031 if (Next != E && !SlotIndex::isSameInstr(OldIdx, Next->start) &&
1032 SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
1033 // If we are here then OldIdx was just a use but not a def. We only have
1034 // to ensure liveness extends to NewIdx.
1035 LiveRange::iterator NewIdxIn =
1036 LR.advanceTo(Next, NewIdx.getBaseIndex());
1037 // Extend the segment before NewIdx if necessary.
1038 if (NewIdxIn == E ||
1039 !SlotIndex::isEarlierInstr(NewIdxIn->start, NewIdx)) {
1040 LiveRange::iterator Prev = std::prev(NewIdxIn);
1041 Prev->end = NewIdx.getRegSlot();
1044 OldIdxIn->end = Next->start;
1048 // Adjust OldIdxIn->end to reach NewIdx. This may temporarily make LR
1049 // invalid by overlapping ranges.
1050 bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
1051 OldIdxIn->end = NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber());
1052 // If this was not a kill, then there was no def and we're done.
1056 // Did we have a Def at OldIdx?
1058 if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
1061 OldIdxOut = OldIdxIn;
1064 // If we are here then there is a Definition at OldIdx. OldIdxOut points
1065 // to the segment starting there.
1066 assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
1068 VNInfo *OldIdxVNI = OldIdxOut->valno;
1069 assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
1071 // If the defined value extends beyond NewIdx, just move the beginning
1072 // of the segment to NewIdx.
1073 SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
1074 if (SlotIndex::isEarlierInstr(NewIdxDef, OldIdxOut->end)) {
1075 OldIdxVNI->def = NewIdxDef;
1076 OldIdxOut->start = OldIdxVNI->def;
1080 // If we are here then we have a Definition at OldIdx which ends before
1083 // Is there an existing Def at NewIdx?
1084 LiveRange::iterator AfterNewIdx
1085 = LR.advanceTo(OldIdxOut, NewIdx.getRegSlot());
1086 bool OldIdxDefIsDead = OldIdxOut->end.isDead();
1087 if (!OldIdxDefIsDead &&
1088 SlotIndex::isEarlierInstr(OldIdxOut->end, NewIdxDef)) {
1089 // OldIdx is not a dead def, and NewIdxDef is inside a new interval.
1091 if (OldIdxOut != LR.begin() &&
1092 !SlotIndex::isEarlierInstr(std::prev(OldIdxOut)->end,
1093 OldIdxOut->start)) {
1094 // There is no gap between OldIdxOut and its predecessor anymore,
1096 LiveRange::iterator IPrev = std::prev(OldIdxOut);
1098 IPrev->end = OldIdxOut->end;
1100 // The value is live in to OldIdx
1101 LiveRange::iterator INext = std::next(OldIdxOut);
1102 assert(INext != E && "Must have following segment");
1103 // We merge OldIdxOut and its successor. As we're dealing with subreg
1104 // reordering, there is always a successor to OldIdxOut in the same BB
1105 // We don't need INext->valno anymore and will reuse for the new segment
1108 INext->start = OldIdxOut->end;
1109 INext->valno->def = INext->start;
1111 // If NewIdx is behind the last segment, extend that and append a new one.
1112 if (AfterNewIdx == E) {
1113 // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
1115 // |- ?/OldIdxOut -| |- X0 -| ... |- Xn -| end
1116 // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS -| end
1117 std::copy(std::next(OldIdxOut), E, OldIdxOut);
1118 // The last segment is undefined now, reuse it for a dead def.
1119 LiveRange::iterator NewSegment = std::prev(E);
1120 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
1122 DefVNI->def = NewIdxDef;
1124 LiveRange::iterator Prev = std::prev(NewSegment);
1125 Prev->end = NewIdxDef;
1127 // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
1129 // |- ?/OldIdxOut -| |- X0 -| ... |- Xn/AfterNewIdx -| |- Next -|
1130 // => |- X0/OldIdxOut -| ... |- Xn -| |- Xn/AfterNewIdx -| |- Next -|
1131 std::copy(std::next(OldIdxOut), std::next(AfterNewIdx), OldIdxOut);
1132 LiveRange::iterator Prev = std::prev(AfterNewIdx);
1133 // We have two cases:
1134 if (SlotIndex::isEarlierInstr(Prev->start, NewIdxDef)) {
1135 // Case 1: NewIdx is inside a liverange. Split this liverange at
1136 // NewIdxDef into the segment "Prev" followed by "NewSegment".
1137 LiveRange::iterator NewSegment = AfterNewIdx;
1138 *NewSegment = LiveRange::Segment(NewIdxDef, Prev->end, Prev->valno);
1139 Prev->valno->def = NewIdxDef;
1141 *Prev = LiveRange::Segment(Prev->start, NewIdxDef, DefVNI);
1142 DefVNI->def = Prev->start;
1144 // Case 2: NewIdx is in a lifetime hole. Keep AfterNewIdx as is and
1145 // turn Prev into a segment from NewIdx to AfterNewIdx->start.
1146 *Prev = LiveRange::Segment(NewIdxDef, AfterNewIdx->start, DefVNI);
1147 DefVNI->def = NewIdxDef;
1148 assert(DefVNI != AfterNewIdx->valno);
1154 if (AfterNewIdx != E &&
1155 SlotIndex::isSameInstr(AfterNewIdx->start, NewIdxDef)) {
1156 // There is an existing def at NewIdx. The def at OldIdx is coalesced into
1158 assert(AfterNewIdx->valno != OldIdxVNI && "Multiple defs of value?");
1159 LR.removeValNo(OldIdxVNI);
1161 // There was no existing def at NewIdx. We need to create a dead def
1162 // at NewIdx. Shift segments over the old OldIdxOut segment, this frees
1163 // a new segment at the place where we want to construct the dead def.
1164 // |- OldIdxOut -| |- X0 -| ... |- Xn -| |- AfterNewIdx -|
1165 // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS. -| |- AfterNewIdx -|
1166 assert(AfterNewIdx != OldIdxOut && "Inconsistent iterators");
1167 std::copy(std::next(OldIdxOut), AfterNewIdx, OldIdxOut);
1168 // We can reuse OldIdxVNI now.
1169 LiveRange::iterator NewSegment = std::prev(AfterNewIdx);
1170 VNInfo *NewSegmentVNI = OldIdxVNI;
1171 NewSegmentVNI->def = NewIdxDef;
1172 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
1177 /// Update LR to reflect an instruction has been moved upwards from OldIdx
1178 /// to NewIdx (NewIdx < OldIdx).
1179 void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
1180 LiveRange::iterator E = LR.end();
1181 // Segment going into OldIdx.
1182 LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
1184 // No value live before or after OldIdx? Nothing to do.
1185 if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
1188 LiveRange::iterator OldIdxOut;
1189 // Do we have a value live-in to OldIdx?
1190 if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
1191 // If the live-in value isn't killed here, then we have no Def at
1192 // OldIdx, moreover the value must be live at NewIdx so there is nothing
1194 bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
1198 // At this point we have to move OldIdxIn->end back to the nearest
1199 // previous use or (dead-)def but no further than NewIdx.
1200 SlotIndex DefBeforeOldIdx
1201 = std::max(OldIdxIn->start.getDeadSlot(),
1202 NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber()));
1203 OldIdxIn->end = findLastUseBefore(DefBeforeOldIdx, Reg, LaneMask);
1205 // Did we have a Def at OldIdx? If not we are done now.
1206 OldIdxOut = std::next(OldIdxIn);
1207 if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
1210 OldIdxOut = OldIdxIn;
1211 OldIdxIn = OldIdxOut != LR.begin() ? std::prev(OldIdxOut) : E;
1214 // If we are here then there is a Definition at OldIdx. OldIdxOut points
1215 // to the segment starting there.
1216 assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
1218 VNInfo *OldIdxVNI = OldIdxOut->valno;
1219 assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
1220 bool OldIdxDefIsDead = OldIdxOut->end.isDead();
1222 // Is there an existing def at NewIdx?
1223 SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
1224 LiveRange::iterator NewIdxOut = LR.find(NewIdx.getRegSlot());
1225 if (SlotIndex::isSameInstr(NewIdxOut->start, NewIdx)) {
1226 assert(NewIdxOut->valno != OldIdxVNI &&
1227 "Same value defined more than once?");
1228 // If OldIdx was a dead def remove it.
1229 if (!OldIdxDefIsDead) {
1230 // Remove segment starting at NewIdx and move begin of OldIdxOut to
1231 // NewIdx so it can take its place.
1232 OldIdxVNI->def = NewIdxDef;
1233 OldIdxOut->start = NewIdxDef;
1234 LR.removeValNo(NewIdxOut->valno);
1236 // Simply remove the dead def at OldIdx.
1237 LR.removeValNo(OldIdxVNI);
1240 // Previously nothing was live after NewIdx, so all we have to do now is
1241 // move the begin of OldIdxOut to NewIdx.
1242 if (!OldIdxDefIsDead) {
1243 // Do we have any intermediate Defs between OldIdx and NewIdx?
1244 if (OldIdxIn != E &&
1245 SlotIndex::isEarlierInstr(NewIdxDef, OldIdxIn->start)) {
1246 // OldIdx is not a dead def and NewIdx is before predecessor start.
1247 LiveRange::iterator NewIdxIn = NewIdxOut;
1248 assert(NewIdxIn == LR.find(NewIdx.getBaseIndex()));
1249 const SlotIndex SplitPos = NewIdxDef;
1250 OldIdxVNI = OldIdxIn->valno;
1252 // Merge the OldIdxIn and OldIdxOut segments into OldIdxOut.
1253 OldIdxOut->valno->def = OldIdxIn->start;
1254 *OldIdxOut = LiveRange::Segment(OldIdxIn->start, OldIdxOut->end,
1256 // OldIdxIn and OldIdxVNI are now undef and can be overridden.
1257 // We Slide [NewIdxIn, OldIdxIn) down one position.
1258 // |- X0/NewIdxIn -| ... |- Xn-1 -||- Xn/OldIdxIn -||- OldIdxOut -|
1259 // => |- undef/NexIdxIn -| |- X0 -| ... |- Xn-1 -| |- Xn/OldIdxOut -|
1260 std::copy_backward(NewIdxIn, OldIdxIn, OldIdxOut);
1261 // NewIdxIn is now considered undef so we can reuse it for the moved
1263 LiveRange::iterator NewSegment = NewIdxIn;
1264 LiveRange::iterator Next = std::next(NewSegment);
1265 if (SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
1266 // There is no gap between NewSegment and its predecessor.
1267 *NewSegment = LiveRange::Segment(Next->start, SplitPos,
1269 *Next = LiveRange::Segment(SplitPos, Next->end, OldIdxVNI);
1270 Next->valno->def = SplitPos;
1272 // There is a gap between NewSegment and its predecessor
1273 // Value becomes live in.
1274 *NewSegment = LiveRange::Segment(SplitPos, Next->start, OldIdxVNI);
1275 NewSegment->valno->def = SplitPos;
1278 // Leave the end point of a live def.
1279 OldIdxOut->start = NewIdxDef;
1280 OldIdxVNI->def = NewIdxDef;
1281 if (OldIdxIn != E && SlotIndex::isEarlierInstr(NewIdx, OldIdxIn->end))
1282 OldIdxIn->end = NewIdx.getRegSlot();
1285 // OldIdxVNI is a dead def. It may have been moved across other values
1286 // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
1287 // down one position.
1288 // |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
1289 // => |- undef/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
1290 std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
1291 // OldIdxVNI can be reused now to build a new dead def segment.
1292 LiveRange::iterator NewSegment = NewIdxOut;
1293 VNInfo *NewSegmentVNI = OldIdxVNI;
1294 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
1296 NewSegmentVNI->def = NewIdxDef;
1301 void updateRegMaskSlots() {
1302 SmallVectorImpl<SlotIndex>::iterator RI =
1303 std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
1305 assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
1306 "No RegMask at OldIdx.");
1307 *RI = NewIdx.getRegSlot();
1308 assert((RI == LIS.RegMaskSlots.begin() ||
1309 SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) &&
1310 "Cannot move regmask instruction above another call");
1311 assert((std::next(RI) == LIS.RegMaskSlots.end() ||
1312 SlotIndex::isEarlierInstr(*RI, *std::next(RI))) &&
1313 "Cannot move regmask instruction below another call");
1316 // Return the last use of reg between NewIdx and OldIdx.
1317 SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg,
1318 LaneBitmask LaneMask) {
1319 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1320 SlotIndex LastUse = Before;
1321 for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) {
1324 unsigned SubReg = MO.getSubReg();
1325 if (SubReg != 0 && LaneMask.any()
1326 && (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask).none())
1329 const MachineInstr &MI = *MO.getParent();
1330 SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
1331 if (InstSlot > LastUse && InstSlot < OldIdx)
1332 LastUse = InstSlot.getRegSlot();
1337 // This is a regunit interval, so scanning the use list could be very
1338 // expensive. Scan upwards from OldIdx instead.
1339 assert(Before < OldIdx && "Expected upwards move");
1340 SlotIndexes *Indexes = LIS.getSlotIndexes();
1341 MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Before);
1343 // OldIdx may not correspond to an instruction any longer, so set MII to
1344 // point to the next instruction after OldIdx, or MBB->end().
1345 MachineBasicBlock::iterator MII = MBB->end();
1346 if (MachineInstr *MI = Indexes->getInstructionFromIndex(
1347 Indexes->getNextNonNullIndex(OldIdx)))
1348 if (MI->getParent() == MBB)
1351 MachineBasicBlock::iterator Begin = MBB->begin();
1352 while (MII != Begin) {
1353 if ((--MII)->isDebugValue())
1355 SlotIndex Idx = Indexes->getInstructionIndex(*MII);
1357 // Stop searching when Before is reached.
1358 if (!SlotIndex::isEarlierInstr(Before, Idx))
1361 // Check if MII uses Reg.
1362 for (MIBundleOperands MO(*MII); MO.isValid(); ++MO)
1363 if (MO->isReg() && !MO->isUndef() &&
1364 TargetRegisterInfo::isPhysicalRegister(MO->getReg()) &&
1365 TRI.hasRegUnit(MO->getReg(), Reg))
1366 return Idx.getRegSlot();
1368 // Didn't reach Before. It must be the first instruction in the block.
1373 void LiveIntervals::handleMove(MachineInstr &MI, bool UpdateFlags) {
1374 assert(!MI.isBundled() && "Can't handle bundled instructions yet.");
1375 SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1376 Indexes->removeMachineInstrFromMaps(MI);
1377 SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
1378 assert(getMBBStartIdx(MI.getParent()) <= OldIndex &&
1379 OldIndex < getMBBEndIdx(MI.getParent()) &&
1380 "Cannot handle moves across basic block boundaries.");
1382 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
1383 HME.updateAllRanges(&MI);
1386 void LiveIntervals::handleMoveIntoBundle(MachineInstr &MI,
1387 MachineInstr &BundleStart,
1389 SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1390 SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
1391 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
1392 HME.updateAllRanges(&MI);
1395 void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
1396 const MachineBasicBlock::iterator End,
1397 const SlotIndex endIdx,
1398 LiveRange &LR, const unsigned Reg,
1399 LaneBitmask LaneMask) {
1400 LiveInterval::iterator LII = LR.find(endIdx);
1401 SlotIndex lastUseIdx;
1402 if (LII == LR.begin()) {
1403 // This happens when the function is called for a subregister that only
1404 // occurs _after_ the range that is to be repaired.
1407 if (LII != LR.end() && LII->start < endIdx)
1408 lastUseIdx = LII->end;
1412 for (MachineBasicBlock::iterator I = End; I != Begin;) {
1414 MachineInstr &MI = *I;
1415 if (MI.isDebugValue())
1418 SlotIndex instrIdx = getInstructionIndex(MI);
1419 bool isStartValid = getInstructionFromIndex(LII->start);
1420 bool isEndValid = getInstructionFromIndex(LII->end);
1422 // FIXME: This doesn't currently handle early-clobber or multiple removed
1423 // defs inside of the region to repair.
1424 for (MachineInstr::mop_iterator OI = MI.operands_begin(),
1425 OE = MI.operands_end();
1427 const MachineOperand &MO = *OI;
1428 if (!MO.isReg() || MO.getReg() != Reg)
1431 unsigned SubReg = MO.getSubReg();
1432 LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg);
1433 if ((Mask & LaneMask).none())
1437 if (!isStartValid) {
1438 if (LII->end.isDead()) {
1439 SlotIndex prevStart;
1440 if (LII != LR.begin())
1441 prevStart = std::prev(LII)->start;
1443 // FIXME: This could be more efficient if there was a
1444 // removeSegment method that returned an iterator.
1445 LR.removeSegment(*LII, true);
1446 if (prevStart.isValid())
1447 LII = LR.find(prevStart);
1451 LII->start = instrIdx.getRegSlot();
1452 LII->valno->def = instrIdx.getRegSlot();
1453 if (MO.getSubReg() && !MO.isUndef())
1454 lastUseIdx = instrIdx.getRegSlot();
1456 lastUseIdx = SlotIndex();
1461 if (!lastUseIdx.isValid()) {
1462 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
1463 LiveRange::Segment S(instrIdx.getRegSlot(),
1464 instrIdx.getDeadSlot(), VNI);
1465 LII = LR.addSegment(S);
1466 } else if (LII->start != instrIdx.getRegSlot()) {
1467 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
1468 LiveRange::Segment S(instrIdx.getRegSlot(), lastUseIdx, VNI);
1469 LII = LR.addSegment(S);
1472 if (MO.getSubReg() && !MO.isUndef())
1473 lastUseIdx = instrIdx.getRegSlot();
1475 lastUseIdx = SlotIndex();
1476 } else if (MO.isUse()) {
1477 // FIXME: This should probably be handled outside of this branch,
1478 // either as part of the def case (for defs inside of the region) or
1479 // after the loop over the region.
1480 if (!isEndValid && !LII->end.isBlock())
1481 LII->end = instrIdx.getRegSlot();
1482 if (!lastUseIdx.isValid())
1483 lastUseIdx = instrIdx.getRegSlot();
1490 LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
1491 MachineBasicBlock::iterator Begin,
1492 MachineBasicBlock::iterator End,
1493 ArrayRef<unsigned> OrigRegs) {
1494 // Find anchor points, which are at the beginning/end of blocks or at
1495 // instructions that already have indexes.
1496 while (Begin != MBB->begin() && !Indexes->hasIndex(*Begin))
1498 while (End != MBB->end() && !Indexes->hasIndex(*End))
1502 if (End == MBB->end())
1503 endIdx = getMBBEndIdx(MBB).getPrevSlot();
1505 endIdx = getInstructionIndex(*End);
1507 Indexes->repairIndexesInRange(MBB, Begin, End);
1509 for (MachineBasicBlock::iterator I = End; I != Begin;) {
1511 MachineInstr &MI = *I;
1512 if (MI.isDebugValue())
1514 for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(),
1515 MOE = MI.operands_end();
1516 MOI != MOE; ++MOI) {
1518 TargetRegisterInfo::isVirtualRegister(MOI->getReg()) &&
1519 !hasInterval(MOI->getReg())) {
1520 createAndComputeVirtRegInterval(MOI->getReg());
1525 for (unsigned Reg : OrigRegs) {
1526 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1529 LiveInterval &LI = getInterval(Reg);
1530 // FIXME: Should we support undefs that gain defs?
1531 if (!LI.hasAtLeastOneValue())
1534 for (LiveInterval::SubRange &S : LI.subranges())
1535 repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask);
1537 repairOldRegInRange(Begin, End, endIdx, LI, Reg);
1541 void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) {
1542 for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
1543 if (LiveRange *LR = getCachedRegUnit(*Unit))
1544 if (VNInfo *VNI = LR->getVNInfoAt(Pos))
1545 LR->removeValNo(VNI);
1549 void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) {
1550 // LI may not have the main range computed yet, but its subranges may
1552 VNInfo *VNI = LI.getVNInfoAt(Pos);
1553 if (VNI != nullptr) {
1554 assert(VNI->def.getBaseIndex() == Pos.getBaseIndex());
1555 LI.removeValNo(VNI);
1558 // Also remove the value defined in subranges.
1559 for (LiveInterval::SubRange &S : LI.subranges()) {
1560 if (VNInfo *SVNI = S.getVNInfoAt(Pos))
1561 if (SVNI->def.getBaseIndex() == Pos.getBaseIndex())
1562 S.removeValNo(SVNI);
1564 LI.removeEmptySubRanges();
1567 void LiveIntervals::splitSeparateComponents(LiveInterval &LI,
1568 SmallVectorImpl<LiveInterval*> &SplitLIs) {
1569 ConnectedVNInfoEqClasses ConEQ(*this);
1570 unsigned NumComp = ConEQ.Classify(LI);
1573 DEBUG(dbgs() << " Split " << NumComp << " components: " << LI << '\n');
1574 unsigned Reg = LI.reg;
1575 const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
1576 for (unsigned I = 1; I < NumComp; ++I) {
1577 unsigned NewVReg = MRI->createVirtualRegister(RegClass);
1578 LiveInterval &NewLI = createEmptyInterval(NewVReg);
1579 SplitLIs.push_back(&NewLI);
1581 ConEQ.Distribute(LI, SplitLIs.data(), *MRI);
1584 void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) {
1585 assert(LRCalc && "LRCalc not initialized.");
1586 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
1587 LRCalc->constructMainRangeFromSubranges(LI);