From f44f371b7f3fab5683e6781873d71987e44fea2f Mon Sep 17 00:00:00 2001 From: Jim Stichnoth Date: Wed, 1 Oct 2014 14:05:51 -0700 Subject: [PATCH] Subzero: Auto-awesome iterators. Use C++11 'auto' where practical to make iteration more concise. Use C++11 range-based for loops where possible. BUG= none R=jfb@chromium.org, kschimpf@google.com Review URL: https://codereview.chromium.org/619893002 --- src/IceCfg.cpp | 111 +++++++++----------------- src/IceCfgNode.cpp | 172 +++++++++++++++++------------------------ src/IceConverter.cpp | 28 +++---- src/IceGlobalContext.cpp | 12 +-- src/IceInst.cpp | 5 +- src/IceIntrinsics.cpp | 6 +- src/IceIntrinsics.h | 2 +- src/IceOperand.cpp | 49 +++++------- src/IceRegAlloc.cpp | 94 ++++++++-------------- src/IceTargetLowering.h | 7 +- src/IceTargetLoweringX8632.cpp | 37 +++------ src/IceTargetLoweringX8632.h | 6 +- src/IceTimerTree.cpp | 4 +- src/IceTranslator.cpp | 14 +--- src/IceTypeConverter.h | 2 +- src/PNaClTranslator.cpp | 10 +-- 16 files changed, 210 insertions(+), 349 deletions(-) diff --git a/src/IceCfg.cpp b/src/IceCfg.cpp index b4c7aca5e..1134fdc70 100644 --- a/src/IceCfg.cpp +++ b/src/IceCfg.cpp @@ -82,9 +82,8 @@ void Cfg::translate() { } void Cfg::computePredecessors() { - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->computePredecessors(); - } + for (CfgNode *Node : Nodes) + Node->computePredecessors(); } void Cfg::renumberInstructions() { @@ -92,18 +91,16 @@ void Cfg::renumberInstructions() { GlobalContext::getTimerID("renumberInstructions"); TimerMarker T(IDrenumberInstructions, getContext()); NextInstNumber = 1; - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->renumberInstructions(); - } + for (CfgNode *Node : Nodes) + Node->renumberInstructions(); } // placePhiLoads() must be called before placePhiStores(). void Cfg::placePhiLoads() { static TimerIdT IDplacePhiLoads = GlobalContext::getTimerID("placePhiLoads"); TimerMarker T(IDplacePhiLoads, getContext()); - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->placePhiLoads(); - } + for (CfgNode *Node : Nodes) + Node->placePhiLoads(); } // placePhiStores() must be called after placePhiLoads(). @@ -111,17 +108,15 @@ void Cfg::placePhiStores() { static TimerIdT IDplacePhiStores = GlobalContext::getTimerID("placePhiStores"); TimerMarker T(IDplacePhiStores, getContext()); - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->placePhiStores(); - } + for (CfgNode *Node : Nodes) + Node->placePhiStores(); } void Cfg::deletePhis() { static TimerIdT IDdeletePhis = GlobalContext::getTimerID("deletePhis"); TimerMarker T(IDdeletePhis, getContext()); - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->deletePhis(); - } + for (CfgNode *Node : Nodes) + Node->deletePhis(); } void Cfg::doArgLowering() { @@ -133,26 +128,23 @@ void Cfg::doArgLowering() { void Cfg::doAddressOpt() { static TimerIdT IDdoAddressOpt = GlobalContext::getTimerID("doAddressOpt"); TimerMarker T(IDdoAddressOpt, getContext()); - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->doAddressOpt(); - } + for (CfgNode *Node : Nodes) + Node->doAddressOpt(); } void Cfg::doNopInsertion() { static TimerIdT IDdoNopInsertion = GlobalContext::getTimerID("doNopInsertion"); TimerMarker T(IDdoNopInsertion, getContext()); - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->doNopInsertion(); - } + for (CfgNode *Node : Nodes) + Node->doNopInsertion(); } void Cfg::genCode() { static TimerIdT IDgenCode = GlobalContext::getTimerID("genCode"); TimerMarker T(IDgenCode, getContext()); - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->genCode(); - } + for (CfgNode *Node : Nodes) + Node->genCode(); } // Compute the stack frame layout. @@ -163,11 +155,9 @@ void Cfg::genFrame() { // TODO: Consider folding epilog generation into the final // emission/assembly pass to avoid an extra iteration over the node // list. Or keep a separate list of exit nodes. - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - CfgNode *Node = *I; + for (CfgNode *Node : Nodes) if (Node->getHasReturn()) getTarget()->addEpilog(Node); - } } // This is a lightweight version of live-range-end calculation. Marks @@ -179,9 +169,8 @@ void Cfg::livenessLightweight() { GlobalContext::getTimerID("livenessLightweight"); TimerMarker T(IDlivenessLightweight, getContext()); getVMetadata()->init(); - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->livenessLightweight(); - } + for (CfgNode *Node : Nodes) + Node->livenessLightweight(); } void Cfg::liveness(LivenessMode Mode) { @@ -194,8 +183,8 @@ void Cfg::liveness(LivenessMode Mode) { llvm::BitVector NeedToProcess(Nodes.size(), true); while (NeedToProcess.any()) { // Iterate in reverse topological order to speed up convergence. - for (NodeList::reverse_iterator I = Nodes.rbegin(), E = Nodes.rend(); - I != E; ++I) { + // TODO(stichnot): Use llvm::make_range with LLVM 3.5. + for (auto I = Nodes.rbegin(), E = Nodes.rend(); I != E; ++I) { CfgNode *Node = *I; if (NeedToProcess[Node->getIndex()]) { NeedToProcess[Node->getIndex()] = false; @@ -203,24 +192,16 @@ void Cfg::liveness(LivenessMode Mode) { if (Changed) { // If the beginning-of-block liveness changed since the last // iteration, mark all in-edges as needing to be processed. - const NodeList &InEdges = Node->getInEdges(); - for (NodeList::const_iterator I1 = InEdges.begin(), - E1 = InEdges.end(); - I1 != E1; ++I1) { - CfgNode *Pred = *I1; + for (CfgNode *Pred : Node->getInEdges()) NeedToProcess[Pred->getIndex()] = true; - } } } } } if (Mode == Liveness_Intervals) { // Reset each variable's live range. - for (VarList::const_iterator I = Variables.begin(), E = Variables.end(); - I != E; ++I) { - if (Variable *Var = *I) - Var->resetLiveRange(); - } + for (Variable *Var : Variables) + Var->resetLiveRange(); } // Collect timing for just the portion that constructs the live // range intervals based on the end-of-live-range computation, for a @@ -229,9 +210,8 @@ void Cfg::liveness(LivenessMode Mode) { // and build each Variable's live range. static TimerIdT IDliveRange = GlobalContext::getTimerID("liveRange"); TimerMarker T1(IDliveRange, getContext()); - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - (*I)->livenessPostprocess(Mode, getLiveness()); - } + for (CfgNode *Node : Nodes) + Node->livenessPostprocess(Mode, getLiveness()); if (Mode == Liveness_Intervals) { // Special treatment for live in-args. Their liveness needs to // extend beyond the beginning of the function, otherwise an arg @@ -280,13 +260,8 @@ bool Cfg::validateLiveness() const { TimerMarker T(IDvalidateLiveness, getContext()); bool Valid = true; Ostream &Str = Ctx->getStrDump(); - for (NodeList::const_iterator I1 = Nodes.begin(), E1 = Nodes.end(); I1 != E1; - ++I1) { - CfgNode *Node = *I1; - InstList &Insts = Node->getInsts(); - for (InstList::const_iterator I2 = Insts.begin(), E2 = Insts.end(); - I2 != E2; ++I2) { - Inst *Inst = *I2; + for (CfgNode *Node : Nodes) { + for (Inst *Inst : Node->getInsts()) { if (Inst->isDeleted()) continue; if (llvm::isa(Inst)) @@ -327,8 +302,8 @@ bool Cfg::validateLiveness() const { void Cfg::doBranchOpt() { static TimerIdT IDdoBranchOpt = GlobalContext::getTimerID("doBranchOpt"); TimerMarker T(IDdoBranchOpt, getContext()); - for (NodeList::iterator I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { - NodeList::iterator NextNode = I; + for (auto I = Nodes.begin(), E = Nodes.end(); I != E; ++I) { + auto NextNode = I; ++NextNode; (*I)->doBranchOpt(NextNode == E ? NULL : *NextNode); } @@ -360,16 +335,11 @@ void Cfg::emit() { Str << "\t.type\t" << MangledName << ",@function\n"; } Str << "\t.p2align " << getTarget()->getBundleAlignLog2Bytes() << ",0x"; - llvm::ArrayRef Pad = getTarget()->getNonExecBundlePadding(); - for (llvm::ArrayRef::iterator I = Pad.begin(), E = Pad.end(); - I != E; ++I) { - Str.write_hex(*I); - } + for (AsmCodeByte I : getTarget()->getNonExecBundlePadding()) + Str.write_hex(I); Str << "\n"; - for (NodeList::const_iterator I = Nodes.begin(), E = Nodes.end(); I != E; - ++I) { - (*I)->emit(this); - } + for (CfgNode *Node : Nodes) + Node->emit(this); Str << "\n"; } @@ -398,9 +368,7 @@ void Cfg::dump(const IceString &Message) { resetCurrentNode(); if (getContext()->isVerbose(IceV_Liveness)) { // Print summary info about variables - for (VarList::const_iterator I = Variables.begin(), E = Variables.end(); - I != E; ++I) { - Variable *Var = *I; + for (Variable *Var : Variables) { Str << "// multiblock="; if (getVMetadata()->isTracked(Var)) Str << getVMetadata()->isMultiBlock(Var); @@ -412,13 +380,10 @@ void Cfg::dump(const IceString &Message) { } } // Print each basic block - for (NodeList::const_iterator I = Nodes.begin(), E = Nodes.end(); I != E; - ++I) { - (*I)->dump(this); - } - if (getContext()->isVerbose(IceV_Instructions)) { + for (CfgNode *Node : Nodes) + Node->dump(this); + if (getContext()->isVerbose(IceV_Instructions)) Str << "}\n"; - } } } // end of namespace Ice diff --git a/src/IceCfgNode.cpp b/src/IceCfgNode.cpp index 011b04dd0..1a386c8e6 100644 --- a/src/IceCfgNode.cpp +++ b/src/IceCfgNode.cpp @@ -55,14 +55,10 @@ void CfgNode::appendInst(Inst *Inst) { // instruction numbers in a block, from lowest to highest, must not // overlap with the range of any other block. void CfgNode::renumberInstructions() { - for (PhiList::const_iterator I = Phis.begin(), E = Phis.end(); I != E; ++I) { - (*I)->renumber(Func); - } - InstList::const_iterator I = Insts.begin(), E = Insts.end(); - while (I != E) { - Inst *Inst = *I++; - Inst->renumber(Func); - } + for (InstPhi *I : Phis) + I->renumber(Func); + for (Inst *I : Insts) + I->renumber(Func); } // When a node is created, the OutEdges are immediately knows, but the @@ -71,11 +67,8 @@ void CfgNode::renumberInstructions() { // creating the InEdges list. void CfgNode::computePredecessors() { OutEdges = (*Insts.rbegin())->getTerminatorEdges(); - for (NodeList::const_iterator I = OutEdges.begin(), E = OutEdges.end(); - I != E; ++I) { - CfgNode *Node = *I; - Node->InEdges.push_back(this); - } + for (CfgNode *Succ : OutEdges) + Succ->InEdges.push_back(this); } // This does part 1 of Phi lowering, by creating a new dest variable @@ -90,10 +83,8 @@ void CfgNode::computePredecessors() { // instructions and appends assignment instructions to predecessor // blocks. Note that this transformation preserves SSA form. void CfgNode::placePhiLoads() { - for (PhiList::iterator I = Phis.begin(), E = Phis.end(); I != E; ++I) { - Inst *Inst = (*I)->lower(Func); - Insts.insert(Insts.begin(), Inst); - } + for (InstPhi *I : Phis) + Insts.insert(Insts.begin(), I->lower(Func)); } // This does part 2 of Phi lowering. For each Phi instruction at each @@ -188,16 +179,12 @@ void CfgNode::placePhiStores() { } // Consider every out-edge. - for (NodeList::const_iterator I1 = OutEdges.begin(), E1 = OutEdges.end(); - I1 != E1; ++I1) { - CfgNode *Target = *I1; + for (CfgNode *Succ : OutEdges) { // Consider every Phi instruction at the out-edge. - for (PhiList::const_iterator I2 = Target->Phis.begin(), - E2 = Target->Phis.end(); - I2 != E2; ++I2) { - Operand *Operand = (*I2)->getOperandForTarget(this); + for (InstPhi *I : Succ->Phis) { + Operand *Operand = I->getOperandForTarget(this); assert(Operand); - Variable *Dest = (*I2)->getDest(); + Variable *Dest = I->getDest(); assert(Dest); InstAssign *NewInst = InstAssign::create(Func, Dest, Operand); if (CmpInstDest == Operand) @@ -210,9 +197,8 @@ void CfgNode::placePhiStores() { // Deletes the phi instructions after the loads and stores are placed. void CfgNode::deletePhis() { - for (PhiList::iterator I = Phis.begin(), E = Phis.end(); I != E; ++I) { - (*I)->setDeleted(); - } + for (InstPhi *I : Phis) + I->setDeleted(); } // Does address mode optimization. Pass each instruction to the @@ -267,16 +253,16 @@ void CfgNode::livenessLightweight() { SizeT NumVars = Func->getNumVariables(); llvm::BitVector Live(NumVars); // Process regular instructions in reverse order. - for (InstList::const_reverse_iterator I = Insts.rbegin(), E = Insts.rend(); - I != E; ++I) { + // TODO(stichnot): Use llvm::make_range with LLVM 3.5. + for (auto I = Insts.rbegin(), E = Insts.rend(); I != E; ++I) { if ((*I)->isDeleted()) continue; (*I)->livenessLightweight(Func, Live); } - for (PhiList::const_iterator I = Phis.begin(), E = Phis.end(); I != E; ++I) { - if ((*I)->isDeleted()) + for (InstPhi *I : Phis) { + if (I->isDeleted()) continue; - (*I)->livenessLightweight(Func, Live); + I->livenessLightweight(Func, Live); } } @@ -295,21 +281,17 @@ bool CfgNode::liveness(Liveness *Liveness) { LiveBegin.assign(NumVars, Sentinel); LiveEnd.assign(NumVars, Sentinel); // Initialize Live to be the union of all successors' LiveIn. - for (NodeList::const_iterator I = OutEdges.begin(), E = OutEdges.end(); - I != E; ++I) { - CfgNode *Succ = *I; + for (CfgNode *Succ : OutEdges) { Live |= Liveness->getLiveIn(Succ); // Mark corresponding argument of phis in successor as live. - for (PhiList::const_iterator I1 = Succ->Phis.begin(), E1 = Succ->Phis.end(); - I1 != E1; ++I1) { - (*I1)->livenessPhiOperand(Live, this, Liveness); - } + for (InstPhi *I : Succ->Phis) + I->livenessPhiOperand(Live, this, Liveness); } Liveness->getLiveOut(this) = Live; // Process regular instructions in reverse order. - for (InstList::const_reverse_iterator I = Insts.rbegin(), E = Insts.rend(); - I != E; ++I) { + // TODO(stichnot): Use llvm::make_range with LLVM 3.5. + for (auto I = Insts.rbegin(), E = Insts.rend(); I != E; ++I) { if ((*I)->isDeleted()) continue; (*I)->liveness((*I)->getNumber(), Live, Liveness, this); @@ -318,12 +300,12 @@ bool CfgNode::liveness(Liveness *Liveness) { // instruction number to be that of the earliest phi instruction in // the block. InstNumberT FirstPhiNumber = Inst::NumberSentinel; - for (PhiList::const_iterator I = Phis.begin(), E = Phis.end(); I != E; ++I) { - if ((*I)->isDeleted()) + for (InstPhi *I : Phis) { + if (I->isDeleted()) continue; if (FirstPhiNumber == Inst::NumberSentinel) - FirstPhiNumber = (*I)->getNumber(); - (*I)->liveness(FirstPhiNumber, Live, Liveness, this); + FirstPhiNumber = I->getNumber(); + I->liveness(FirstPhiNumber, Live, Liveness, this); } // When using the sparse representation, after traversing the @@ -376,36 +358,33 @@ void CfgNode::livenessPostprocess(LivenessMode Mode, Liveness *Liveness) { InstNumberT FirstInstNum = Inst::NumberSentinel; InstNumberT LastInstNum = Inst::NumberSentinel; // Process phis in any order. Process only Dest operands. - for (PhiList::const_iterator I = Phis.begin(), E = Phis.end(); I != E; ++I) { - InstPhi *Inst = *I; - Inst->deleteIfDead(); - if (Inst->isDeleted()) + for (InstPhi *I : Phis) { + I->deleteIfDead(); + if (I->isDeleted()) continue; if (FirstInstNum == Inst::NumberSentinel) - FirstInstNum = Inst->getNumber(); - assert(Inst->getNumber() > LastInstNum); - LastInstNum = Inst->getNumber(); + FirstInstNum = I->getNumber(); + assert(I->getNumber() > LastInstNum); + LastInstNum = I->getNumber(); } // Process instructions - for (InstList::const_iterator I = Insts.begin(), E = Insts.end(); I != E; - ++I) { - Inst *Inst = *I; - Inst->deleteIfDead(); - if (Inst->isDeleted()) + for (Inst *I : Insts) { + I->deleteIfDead(); + if (I->isDeleted()) continue; if (FirstInstNum == Inst::NumberSentinel) - FirstInstNum = Inst->getNumber(); - assert(Inst->getNumber() > LastInstNum); - LastInstNum = Inst->getNumber(); + FirstInstNum = I->getNumber(); + assert(I->getNumber() > LastInstNum); + LastInstNum = I->getNumber(); // Create fake live ranges for a Kill instruction, but only if the // linked instruction is still alive. if (Mode == Liveness_Intervals) { - if (InstFakeKill *Kill = llvm::dyn_cast(Inst)) { + if (InstFakeKill *Kill = llvm::dyn_cast(I)) { if (!Kill->getLinked()->isDeleted()) { - SizeT NumSrcs = Inst->getSrcSize(); - for (SizeT i = 0; i < NumSrcs; ++i) { - Variable *Var = llvm::cast(Inst->getSrc(i)); - InstNumberT InstNumber = Inst->getNumber(); + SizeT NumSrcs = I->getSrcSize(); + for (SizeT Src = 0; Src < NumSrcs; ++Src) { + Variable *Var = llvm::cast(I->getSrc(Src)); + InstNumberT InstNumber = I->getNumber(); Liveness->addLiveRange(Var, InstNumber, InstNumber, 1); } } @@ -454,10 +433,8 @@ void CfgNode::doBranchOpt(const CfgNode *NextNode) { // first opportunity, unless there is some target lowering where we // have the possibility of multiple such optimizations per block // (currently not the case for x86 lowering). - for (InstList::const_iterator I = Insts.begin(), E = Insts.end(); I != E; - ++I) { - Target->doBranchOpt(*I, NextNode); - } + for (Inst *I : Insts) + Target->doBranchOpt(I, NextNode); } // ======================== Dump routines ======================== // @@ -469,38 +446,35 @@ void CfgNode::emit(Cfg *Func) const { Str << Func->getContext()->mangleName(Func->getFunctionName()) << ":\n"; } Str << getAsmName() << ":\n"; - for (PhiList::const_iterator I = Phis.begin(), E = Phis.end(); I != E; ++I) { - InstPhi *Phi = *I; + for (InstPhi *Phi : Phis) { if (Phi->isDeleted()) continue; // Emitting a Phi instruction should cause an error. Inst *Instr = Phi; Instr->emit(Func); } - for (InstList::const_iterator I = Insts.begin(), E = Insts.end(); I != E; - ++I) { - Inst *Inst = *I; - if (Inst->isDeleted()) + for (Inst *I : Insts) { + if (I->isDeleted()) continue; // Here we detect redundant assignments like "mov eax, eax" and // suppress them. - if (Inst->isRedundantAssign()) + if (I->isRedundantAssign()) continue; if (Func->UseIntegratedAssembler()) { - (*I)->emitIAS(Func); + I->emitIAS(Func); } else { - (*I)->emit(Func); + I->emit(Func); } // Update emitted instruction count, plus fill/spill count for // Variable operands without a physical register. - if (uint32_t Count = (*I)->getEmitInstCount()) { + if (uint32_t Count = I->getEmitInstCount()) { Func->getContext()->statsUpdateEmitted(Count); - if (Variable *Dest = (*I)->getDest()) { + if (Variable *Dest = I->getDest()) { if (!Dest->hasReg()) Func->getContext()->statsUpdateFills(); } - for (SizeT S = 0; S < (*I)->getSrcSize(); ++S) { - if (Variable *Src = llvm::dyn_cast((*I)->getSrc(S))) { + for (SizeT S = 0; S < I->getSrcSize(); ++S) { + if (Variable *Src = llvm::dyn_cast(I->getSrc(S))) { if (!Src->hasReg()) Func->getContext()->statsUpdateSpills(); } @@ -519,11 +493,12 @@ void CfgNode::dump(Cfg *Func) const { // Dump list of predecessor nodes. if (Func->getContext()->isVerbose(IceV_Preds) && !InEdges.empty()) { Str << " // preds = "; - for (NodeList::const_iterator I = InEdges.begin(), E = InEdges.end(); - I != E; ++I) { - if (I != InEdges.begin()) + bool First = true; + for (CfgNode *I : InEdges) { + if (First) Str << ", "; - Str << "%" << (*I)->getName(); + First = false; + Str << "%" << I->getName(); } Str << "\n"; } @@ -542,16 +517,10 @@ void CfgNode::dump(Cfg *Func) const { } // Dump each instruction. if (Func->getContext()->isVerbose(IceV_Instructions)) { - for (PhiList::const_iterator I = Phis.begin(), E = Phis.end(); I != E; - ++I) { - const Inst *Inst = *I; - Inst->dumpDecorated(Func); - } - InstList::const_iterator I = Insts.begin(), E = Insts.end(); - while (I != E) { - Inst *Inst = *I++; - Inst->dumpDecorated(Func); - } + for (InstPhi *I : Phis) + I->dumpDecorated(Func); + for (Inst *I : Insts) + I->dumpDecorated(Func); } // Dump the live-out variables. llvm::BitVector LiveOut; @@ -569,11 +538,12 @@ void CfgNode::dump(Cfg *Func) const { // Dump list of successor nodes. if (Func->getContext()->isVerbose(IceV_Succs)) { Str << " // succs = "; - for (NodeList::const_iterator I = OutEdges.begin(), E = OutEdges.end(); - I != E; ++I) { - if (I != OutEdges.begin()) + bool First = true; + for (CfgNode *I : OutEdges) { + if (First) Str << ", "; - Str << "%" << (*I)->getName(); + First = false; + Str << "%" << I->getName(); } Str << "\n"; } diff --git a/src/IceConverter.cpp b/src/IceConverter.cpp index ca8a96a61..df64cff9a 100644 --- a/src/IceConverter.cpp +++ b/src/IceConverter.cpp @@ -70,9 +70,8 @@ public: Func->setInternal(F->hasInternalLinkage()); // The initial definition/use of each arg is the entry node. - for (Function::const_arg_iterator ArgI = F->arg_begin(), - ArgE = F->arg_end(); - ArgI != ArgE; ++ArgI) { + for (auto ArgI = F->arg_begin(), ArgE = F->arg_end(); ArgI != ArgE; + ++ArgI) { Func->addArg(mapValueToIceVar(ArgI)); } @@ -80,14 +79,10 @@ public: // blocks in the original linearized order. Otherwise the ICE // linearized order will be affected by branch targets in // terminator instructions. - for (Function::const_iterator BBI = F->begin(), BBE = F->end(); BBI != BBE; - ++BBI) { - mapBasicBlockToNode(BBI); - } - for (Function::const_iterator BBI = F->begin(), BBE = F->end(); BBI != BBE; - ++BBI) { - convertBasicBlock(BBI); - } + for (const BasicBlock &BBI : *F) + mapBasicBlockToNode(&BBI); + for (const BasicBlock &BBI : *F) + convertBasicBlock(&BBI); Func->setEntryNode(mapBasicBlockToNode(&F->getEntryBlock())); Func->computePredecessors(); @@ -564,9 +559,8 @@ private: Ice::CfgNode *convertBasicBlock(const BasicBlock *BB) { Ice::CfgNode *Node = mapBasicBlockToNode(BB); - for (BasicBlock::const_iterator II = BB->begin(), II_e = BB->end(); - II != II_e; ++II) { - Ice::Inst *Inst = convertInstruction(II); + for (const Instruction &II : *BB) { + Ice::Inst *Inst = convertInstruction(&II); Node->appendInst(Inst); } return Node; @@ -632,12 +626,12 @@ void Converter::convertToIce() { } void Converter::convertFunctions() { - for (Module::const_iterator I = Mod->begin(), E = Mod->end(); I != E; ++I) { - if (I->empty()) + for (const Function &I : *Mod) { + if (I.empty()) continue; LLVM2ICEConverter FunctionConverter(Ctx, Mod->getContext()); - Cfg *Fcn = FunctionConverter.convertFunction(I); + Cfg *Fcn = FunctionConverter.convertFunction(&I); translateFcn(Fcn); } diff --git a/src/IceGlobalContext.cpp b/src/IceGlobalContext.cpp index 251635f39..c2262ad31 100644 --- a/src/IceGlobalContext.cpp +++ b/src/IceGlobalContext.cpp @@ -42,7 +42,7 @@ public: TypePool() : NextPoolID(0) {} ValueType *getOrAdd(GlobalContext *Ctx, Type Ty, KeyType Key) { TupleType TupleKey = std::make_pair(Ty, Key); - typename ContainerType::const_iterator Iter = Pool.find(TupleKey); + auto Iter = Pool.find(TupleKey); if (Iter != Pool.end()) return Iter->second; ValueType *Result = ValueType::create(Ctx, Ty, Key, NextPoolID++); @@ -52,12 +52,8 @@ public: ConstantList getConstantPool() const { ConstantList Constants; Constants.reserve(Pool.size()); - // TODO: replace the loop with std::transform + lambdas. - for (typename ContainerType::const_iterator I = Pool.begin(), - E = Pool.end(); - I != E; ++I) { - Constants.push_back(I->second); - } + for (auto &I : Pool) + Constants.push_back(I.second); return Constants; } @@ -86,7 +82,7 @@ public: UndefPool() : NextPoolID(0) {} ConstantUndef *getOrAdd(GlobalContext *Ctx, Type Ty) { - ContainerType::iterator I = Pool.find(Ty); + auto I = Pool.find(Ty); if (I != Pool.end()) return I->second; ConstantUndef *Undef = ConstantUndef::create(Ctx, Ty, NextPoolID++); diff --git a/src/IceInst.cpp b/src/IceInst.cpp index 9602935a4..57b0bd1ec 100644 --- a/src/IceInst.cpp +++ b/src/IceInst.cpp @@ -420,11 +420,8 @@ InstFakeKill::InstFakeKill(Cfg *Func, const VarList &KilledRegs, const Inst *Linked) : InstHighLevel(Func, Inst::FakeKill, KilledRegs.size(), NULL), Linked(Linked) { - for (VarList::const_iterator I = KilledRegs.begin(), E = KilledRegs.end(); - I != E; ++I) { - Variable *Var = *I; + for (Variable *Var : KilledRegs) addSource(Var); - } } // ======================== Dump routines ======================== // diff --git a/src/IceIntrinsics.cpp b/src/IceIntrinsics.cpp index 6e2448dd6..2c97809a1 100644 --- a/src/IceIntrinsics.cpp +++ b/src/IceIntrinsics.cpp @@ -207,7 +207,7 @@ Intrinsics::Intrinsics() { for (size_t I = 0; I < IceIntrinsicsTableSize; ++I) { const struct IceIntrinsicsEntry_ &Entry = IceIntrinsicsTable[I]; assert(Entry.Info.NumTypes <= kMaxIntrinsicParameters); - map.insert(std::make_pair(IceString(Entry.IntrinsicName), Entry.Info)); + Map.insert(std::make_pair(IceString(Entry.IntrinsicName), Entry.Info)); } } @@ -215,8 +215,8 @@ Intrinsics::~Intrinsics() {} const Intrinsics::FullIntrinsicInfo * Intrinsics::find(const IceString &Name) const { - IntrinsicMap::const_iterator it = map.find(Name); - if (it == map.end()) + auto it = Map.find(Name); + if (it == Map.end()) return NULL; return &it->second; } diff --git a/src/IceIntrinsics.h b/src/IceIntrinsics.h index bd0f118cb..7931539e6 100644 --- a/src/IceIntrinsics.h +++ b/src/IceIntrinsics.h @@ -157,7 +157,7 @@ public: private: // TODO(jvoung): May want to switch to something like LLVM's StringMap. typedef std::map IntrinsicMap; - IntrinsicMap map; + IntrinsicMap Map; Intrinsics(const Intrinsics &) LLVM_DELETED_FUNCTION; Intrinsics &operator=(const Intrinsics &) LLVM_DELETED_FUNCTION; diff --git a/src/IceOperand.cpp b/src/IceOperand.cpp index 361472595..26ef83089 100644 --- a/src/IceOperand.cpp +++ b/src/IceOperand.cpp @@ -123,13 +123,12 @@ bool LiveRange::overlaps(const LiveRange &Other) const { bool LiveRange::overlaps(InstNumberT OtherBegin) const { bool Result = false; - for (RangeType::const_iterator I = Range.begin(), E = Range.end(); I != E; - ++I) { - if (OtherBegin < I->first) { + for (const RangeElementType &I : Range) { + if (OtherBegin < I.first) { Result = false; break; } - if (OtherBegin < I->second) { + if (OtherBegin < I.second) { Result = true; break; } @@ -148,9 +147,8 @@ bool LiveRange::overlaps(InstNumberT OtherBegin) const { // number. This is only used for validating the live range // calculation. bool LiveRange::containsValue(InstNumberT Value) const { - for (RangeType::const_iterator I = Range.begin(), E = Range.end(); I != E; - ++I) { - if (I->first <= Value && Value <= I->second) + for (const RangeElementType &I : Range) { + if (I.first <= Value && Value <= I.second) return true; } return false; @@ -282,11 +280,7 @@ void VariablesMetadata::init() { Metadata.resize(Func->getNumVariables()); // Mark implicit args as being used in the entry node. - const VarList &ImplicitArgList = Func->getImplicitArgs(); - for (VarList::const_iterator I = ImplicitArgList.begin(), - E = ImplicitArgList.end(); - I != E; ++I) { - const Variable *Var = *I; + for (Variable *Var : Func->getImplicitArgs()) { const Inst *NoInst = NULL; const CfgNode *EntryNode = Func->getEntryNode(); const bool IsFromDef = false; @@ -297,30 +291,28 @@ void VariablesMetadata::init() { SizeT NumNodes = Func->getNumNodes(); for (SizeT N = 0; N < NumNodes; ++N) { CfgNode *Node = Func->getNodes()[N]; - const InstList &Insts = Node->getInsts(); - for (InstList::const_iterator I = Insts.begin(), E = Insts.end(); I != E; - ++I) { - if ((*I)->isDeleted()) + for (Inst *I : Node->getInsts()) { + if (I->isDeleted()) continue; - if (InstFakeKill *Kill = llvm::dyn_cast(*I)) { + if (InstFakeKill *Kill = llvm::dyn_cast(I)) { // A FakeKill instruction indicates certain Variables (usually // physical scratch registers) are redefined, so we register // them as defs. - for (SizeT SrcNum = 0; SrcNum < (*I)->getSrcSize(); ++SrcNum) { - Variable *Var = llvm::cast((*I)->getSrc(SrcNum)); + for (SizeT SrcNum = 0; SrcNum < I->getSrcSize(); ++SrcNum) { + Variable *Var = llvm::cast(I->getSrc(SrcNum)); SizeT VarNum = Var->getIndex(); assert(VarNum < Metadata.size()); Metadata[VarNum].markDef(Kill, Node); } continue; // no point in executing the rest } - if (Variable *Dest = (*I)->getDest()) { + if (Variable *Dest = I->getDest()) { SizeT DestNum = Dest->getIndex(); assert(DestNum < Metadata.size()); - Metadata[DestNum].markDef(*I, Node); + Metadata[DestNum].markDef(I, Node); } - for (SizeT SrcNum = 0; SrcNum < (*I)->getSrcSize(); ++SrcNum) { - Operand *Src = (*I)->getSrc(SrcNum); + for (SizeT SrcNum = 0; SrcNum < I->getSrcSize(); ++SrcNum) { + Operand *Src = I->getSrc(SrcNum); SizeT NumVars = Src->getNumVars(); for (SizeT J = 0; J < NumVars; ++J) { const Variable *Var = Src->getVar(J); @@ -328,7 +320,7 @@ void VariablesMetadata::init() { assert(VarNum < Metadata.size()); const bool IsFromDef = false; const bool IsImplicit = false; - Metadata[VarNum].markUse(*I, Node, IsFromDef, IsImplicit); + Metadata[VarNum].markUse(I, Node, IsFromDef, IsImplicit); } } } @@ -440,11 +432,12 @@ void ConstantRelocatable::dump(const Cfg *, Ostream &Str) const { void LiveRange::dump(Ostream &Str) const { Str << "(weight=" << Weight << ") "; - for (RangeType::const_iterator I = Range.begin(), E = Range.end(); I != E; - ++I) { - if (I != Range.begin()) + bool First = true; + for (const RangeElementType &I : Range) { + if (First) Str << ", "; - Str << "[" << (*I).first << ":" << (*I).second << ")"; + First = false; + Str << "[" << I.first << ":" << I.second << ")"; } } diff --git a/src/IceRegAlloc.cpp b/src/IceRegAlloc.cpp index e072085e8..3a4c17831 100644 --- a/src/IceRegAlloc.cpp +++ b/src/IceRegAlloc.cpp @@ -88,9 +88,7 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { static TimerIdT IDinitUnhandled = GlobalContext::getTimerID("initUnhandled"); TimerMarker T(IDinitUnhandled, Func->getContext()); - for (VarList::const_iterator I = Vars.begin(), E = Vars.end(); I != E; - ++I) { - Variable *Var = *I; + for (Variable *Var : Vars) { // Explicitly don't consider zero-weight variables, which are // meant to be spill slots. if (Var->getWeight() == RegWeight::Zero) @@ -151,8 +149,7 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { } // Check for active ranges that have expired or become inactive. - for (UnorderedRanges::iterator I = Active.begin(), E = Active.end(); I != E; - I = Next) { + for (auto I = Active.begin(), E = Active.end(); I != E; I = Next) { Next = I; ++Next; LiveRangeWrapper Item = *I; @@ -188,8 +185,7 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { } // Check for inactive ranges that have expired or reactivated. - for (UnorderedRanges::iterator I = Inactive.begin(), E = Inactive.end(); - I != E; I = Next) { + for (auto I = Inactive.begin(), E = Inactive.end(); I != E; I = Next) { Next = I; ++Next; LiveRangeWrapper Item = *I; @@ -280,10 +276,7 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { // Remove registers from the Free[] list where an Inactive range // overlaps with the current range. - for (UnorderedRanges::const_iterator I = Inactive.begin(), - E = Inactive.end(); - I != E; ++I) { - LiveRangeWrapper Item = *I; + for (const LiveRangeWrapper &Item : Inactive) { if (Item.overlaps(Cur)) { int32_t RegNum = Item.Var->getRegNumTmp(); // Don't assert(Free[RegNum]) because in theory (though @@ -304,9 +297,7 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { // Disable AllowOverlap if an Active variable, which is not // Prefer, shares Prefer's register, and has a definition within // Cur's live range. - for (UnorderedRanges::iterator I = Active.begin(), E = Active.end(); - AllowOverlap && I != E; ++I) { - LiveRangeWrapper Item = *I; + for (const LiveRangeWrapper &Item : Active) { int32_t RegNum = Item.Var->getRegNumTmp(); if (Item.Var != Prefer && RegNum == PreferReg && overlapsDefs(Func, Cur, Item.Var)) { @@ -317,14 +308,13 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { // Remove registers from the Free[] list where an Unhandled range // overlaps with the current range and is precolored. - // Cur.endsBefore(*I) is an early exit check that turns a + // Cur.endsBefore(Item) is an early exit check that turns a // guaranteed O(N^2) algorithm into expected linear complexity. llvm::SmallBitVector PrecoloredUnhandled(RegMask.size()); // Note: PrecoloredUnhandled is only used for dumping. - for (OrderedRanges::const_iterator I = Unhandled.begin(), - E = Unhandled.end(); - I != E && !Cur.endsBefore(*I); ++I) { - LiveRangeWrapper Item = *I; + for (const LiveRangeWrapper &Item : Unhandled) { + if (Cur.endsBefore(Item)) + break; if (Item.Var->hasReg() && Item.overlaps(Cur)) { int32_t ItemReg = Item.Var->getRegNum(); // Note: not getRegNumTmp() Free[ItemReg] = false; @@ -381,19 +371,14 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { // lowest-weight register and see if Cur has higher weight. std::vector Weights(RegMask.size()); // Check Active ranges. - for (UnorderedRanges::const_iterator I = Active.begin(), E = Active.end(); - I != E; ++I) { - LiveRangeWrapper Item = *I; + for (const LiveRangeWrapper &Item : Active) { assert(Item.overlaps(Cur)); int32_t RegNum = Item.Var->getRegNumTmp(); assert(Item.Var->hasRegTmp()); Weights[RegNum].addWeight(Item.range().getWeight()); } // Same as above, but check Inactive ranges instead of Active. - for (UnorderedRanges::const_iterator I = Inactive.begin(), - E = Inactive.end(); - I != E; ++I) { - LiveRangeWrapper Item = *I; + for (const LiveRangeWrapper &Item : Inactive) { int32_t RegNum = Item.Var->getRegNumTmp(); assert(Item.Var->hasRegTmp()); if (Item.overlaps(Cur)) @@ -402,10 +387,9 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { // Check Unhandled ranges that overlap Cur and are precolored. // Cur.endsBefore(*I) is an early exit check that turns a // guaranteed O(N^2) algorithm into expected linear complexity. - for (OrderedRanges::const_iterator I = Unhandled.begin(), - E = Unhandled.end(); - I != E && !Cur.endsBefore(*I); ++I) { - LiveRangeWrapper Item = *I; + for (const LiveRangeWrapper &Item : Unhandled) { + if (Cur.endsBefore(Item)) + break; int32_t RegNum = Item.Var->getRegNumTmp(); if (RegNum < 0) continue; @@ -436,8 +420,7 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { } else { // Evict all live ranges in Active that register number // MinWeightIndex is assigned to. - for (UnorderedRanges::iterator I = Active.begin(), E = Active.end(); - I != E; I = Next) { + for (auto I = Active.begin(), E = Active.end(); I != E; I = Next) { Next = I; ++Next; LiveRangeWrapper Item = *I; @@ -455,8 +438,7 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { } } // Do the same for Inactive. - for (UnorderedRanges::iterator I = Inactive.begin(), E = Inactive.end(); - I != E; I = Next) { + for (auto I = Inactive.begin(), E = Inactive.end(); I != E; I = Next) { Next = I; ++Next; LiveRangeWrapper Item = *I; @@ -496,26 +478,16 @@ void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { dump(Func); } // Move anything Active or Inactive to Handled for easier handling. - for (UnorderedRanges::iterator I = Active.begin(), E = Active.end(); I != E; - I = Next) { - Next = I; - ++Next; - Handled.push_back(*I); - Active.erase(I); - } - for (UnorderedRanges::iterator I = Inactive.begin(), E = Inactive.end(); - I != E; I = Next) { - Next = I; - ++Next; - Handled.push_back(*I); - Inactive.erase(I); - } + for (const LiveRangeWrapper &I : Active) + Handled.push_back(I); + Active.clear(); + for (const LiveRangeWrapper &I : Inactive) + Handled.push_back(I); + Inactive.clear(); dump(Func); // Finish up by assigning RegNumTmp->RegNum for each Variable. - for (UnorderedRanges::const_iterator I = Handled.begin(), E = Handled.end(); - I != E; ++I) { - LiveRangeWrapper Item = *I; + for (const LiveRangeWrapper &Item : Handled) { int32_t RegNum = Item.Var->getRegNumTmp(); if (Verbose) { if (!Item.Var->hasRegTmp()) { @@ -564,27 +536,23 @@ void LinearScan::dump(Cfg *Func) const { Func->resetCurrentNode(); Str << "**** Current regalloc state:\n"; Str << "++++++ Handled:\n"; - for (UnorderedRanges::const_iterator I = Handled.begin(), E = Handled.end(); - I != E; ++I) { - I->dump(Func); + for (const LiveRangeWrapper &Item : Handled) { + Item.dump(Func); Str << "\n"; } Str << "++++++ Unhandled:\n"; - for (OrderedRanges::const_iterator I = Unhandled.begin(), E = Unhandled.end(); - I != E; ++I) { - I->dump(Func); + for (const LiveRangeWrapper &Item : Unhandled) { + Item.dump(Func); Str << "\n"; } Str << "++++++ Active:\n"; - for (UnorderedRanges::const_iterator I = Active.begin(), E = Active.end(); - I != E; ++I) { - I->dump(Func); + for (const LiveRangeWrapper &Item : Active) { + Item.dump(Func); Str << "\n"; } Str << "++++++ Inactive:\n"; - for (UnorderedRanges::const_iterator I = Inactive.begin(), E = Inactive.end(); - I != E; ++I) { - I->dump(Func); + for (const LiveRangeWrapper &Item : Inactive) { + Item.dump(Func); Str << "\n"; } } diff --git a/src/IceTargetLowering.h b/src/IceTargetLowering.h index 199512482..43e6a012a 100644 --- a/src/IceTargetLowering.h +++ b/src/IceTargetLowering.h @@ -24,6 +24,8 @@ namespace Ice { +typedef uint8_t AsmCodeByte; + class Assembler; // LoweringContext makes it easy to iterate through non-deleted @@ -52,6 +54,9 @@ public: bool atEnd() const { return Cur == End; } InstList::iterator getCur() const { return Cur; } InstList::iterator getEnd() const { return End; } + // Adaptor to enable range-based for loops. + InstList::iterator begin() const { return getCur(); } + InstList::iterator end() const { return getEnd(); } void insert(Inst *Inst); Inst *getLastInserted() const; void advanceCur() { Cur = Next; } @@ -145,7 +150,7 @@ public: virtual SizeT getFrameOrStackReg() const = 0; virtual size_t typeWidthInBytesOnStack(Type Ty) const = 0; virtual SizeT getBundleAlignLog2Bytes() const = 0; - virtual llvm::ArrayRef getNonExecBundlePadding() const = 0; + virtual llvm::ArrayRef getNonExecBundlePadding() const = 0; bool hasComputedFrame() const { return HasComputedFrame; } bool shouldDoNopInsertion() const; // Returns true if this function calls a function that has the diff --git a/src/IceTargetLoweringX8632.cpp b/src/IceTargetLoweringX8632.cpp index 1a7ae895d..55f113b0c 100644 --- a/src/IceTargetLoweringX8632.cpp +++ b/src/IceTargetLoweringX8632.cpp @@ -555,9 +555,7 @@ void TargetX8632::sortByAlignment(VarList &Dest, const VarList &Source) const { X86_LOG2_OF_MAX_STACK_SLOT_SIZE - X86_LOG2_OF_MIN_STACK_SLOT_SIZE + 1; VarList Buckets[NumBuckets]; - for (VarList::const_iterator I = Source.begin(), E = Source.end(); I != E; - ++I) { - Variable *Var = *I; + for (Variable *Var : Source) { uint32_t NaturalAlignment = typeWidthInBytesOnStack(Var->getType()); SizeT LogNaturalAlignment = llvm::findFirstSet(NaturalAlignment); assert(LogNaturalAlignment >= X86_LOG2_OF_MIN_STACK_SLOT_SIZE); @@ -698,9 +696,7 @@ void TargetX8632::addProlog(CfgNode *Node) { // The entire spill locations area gets aligned to largest natural // alignment of the variables that have a spill slot. uint32_t SpillAreaAlignmentBytes = 0; - for (VarList::const_iterator I = Variables.begin(), E = Variables.end(); - I != E; ++I) { - Variable *Var = *I; + for (Variable *Var : Variables) { if (Var->hasReg()) { RegsUsed[Var->getRegNum()] = true; continue; @@ -726,10 +722,7 @@ void TargetX8632::addProlog(CfgNode *Node) { SortedSpilledVariables.reserve(SpilledVariables.size()); sortByAlignment(SortedSpilledVariables, SpilledVariables); - for (VarList::const_iterator I = SortedSpilledVariables.begin(), - E = SortedSpilledVariables.end(); - I != E; ++I) { - Variable *Var = *I; + for (Variable *Var : SortedSpilledVariables) { size_t Increment = typeWidthInBytesOnStack(Var->getType()); if (!SpillAreaAlignmentBytes) SpillAreaAlignmentBytes = Increment; @@ -837,10 +830,7 @@ void TargetX8632::addProlog(CfgNode *Node) { size_t GlobalsSpaceUsed = SpillAreaPaddingBytes; LocalsSize.assign(LocalsSize.size(), 0); size_t NextStackOffset = GlobalsSpaceUsed; - for (VarList::const_iterator I = SortedSpilledVariables.begin(), - E = SortedSpilledVariables.end(); - I != E; ++I) { - Variable *Var = *I; + for (Variable *Var : SortedSpilledVariables) { size_t Increment = typeWidthInBytesOnStack(Var->getType()); if (SimpleCoalescing && VMetadata->isTracked(Var)) { if (VMetadata->isMultiBlock(Var)) { @@ -866,10 +856,7 @@ void TargetX8632::addProlog(CfgNode *Node) { // Assign stack offsets to variables that have been linked to spilled // variables. - for (VarList::const_iterator I = VariablesLinkedToSpillSlots.begin(), - E = VariablesLinkedToSpillSlots.end(); - I != E; ++I) { - Variable *Var = *I; + for (Variable *Var : VariablesLinkedToSpillSlots) { Variable *Linked = (llvm::cast(Var))->getLinkedTo(); Var->setStackOffset(Linked->getStackOffset()); } @@ -904,6 +891,7 @@ void TargetX8632::addProlog(CfgNode *Node) { void TargetX8632::addEpilog(CfgNode *Node) { InstList &Insts = Node->getInsts(); InstList::reverse_iterator RI, E; + // TODO(stichnot): Use llvm::make_range with LLVM 3.5. for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) { if (llvm::isa(*RI)) break; @@ -979,9 +967,8 @@ template void TargetX8632::emitConstantPool() const { Str << "\t.section\t.rodata.cst" << Align << ",\"aM\",@progbits," << Align << "\n"; Str << "\t.align\t" << Align << "\n"; - for (ConstantList::const_iterator I = Pool.begin(), E = Pool.end(); I != E; - ++I) { - typename T::IceType *Const = llvm::cast(*I); + for (Constant *C : Pool) { + typename T::IceType *Const = llvm::cast(C); typename T::PrimitiveFpType Value = Const->getValue(); // Use memcpy() to copy bits from Value into RawValue in a way // that avoids breaking strict-aliasing rules. @@ -4332,9 +4319,7 @@ void TargetX8632::postLower() { // The first pass also keeps track of which instruction is the last // use for each infinite-weight variable. After the last use, the // variable is released to the free list. - for (InstList::iterator I = Context.getCur(), E = Context.getEnd(); I != E; - ++I) { - const Inst *Inst = *I; + for (Inst *Inst : Context) { if (Inst->isDeleted()) continue; // Don't consider a FakeKill instruction, because (currently) it @@ -4366,10 +4351,8 @@ void TargetX8632::postLower() { // The second pass colors infinite-weight variables. llvm::SmallBitVector AvailableRegisters = WhiteList; llvm::SmallBitVector FreedRegisters(WhiteList.size()); - for (InstList::iterator I = Context.getCur(), E = Context.getEnd(); I != E; - ++I) { + for (Inst *Inst : Context) { FreedRegisters.reset(); - const Inst *Inst = *I; if (Inst->isDeleted()) continue; // Skip FakeKill instructions like above. diff --git a/src/IceTargetLoweringX8632.h b/src/IceTargetLoweringX8632.h index 37f7fef67..e2291a37e 100644 --- a/src/IceTargetLoweringX8632.h +++ b/src/IceTargetLoweringX8632.h @@ -49,9 +49,9 @@ public: return (typeWidthInBytes(Ty) + 3) & ~3; } SizeT getBundleAlignLog2Bytes() const override { return 5; } - llvm::ArrayRef getNonExecBundlePadding() const override { - static const uint8_t Padding[] = { 0xF4 }; - return llvm::ArrayRef(Padding, 1); + llvm::ArrayRef getNonExecBundlePadding() const override { + static const AsmCodeByte Padding[] = { 0xF4 }; + return llvm::ArrayRef(Padding, 1); } void emitVariable(const Variable *Var) const override; void lowerArguments() override; diff --git a/src/IceTimerTree.cpp b/src/IceTimerTree.cpp index fa0e3219a..847941fa0 100644 --- a/src/IceTimerTree.cpp +++ b/src/IceTimerTree.cpp @@ -101,8 +101,8 @@ typedef std::multimap DumpMapType; // Dump the Map items in reverse order of their time contribution. void dumpHelper(Ostream &Str, const DumpMapType &Map, double TotalTime) { - for (DumpMapType::const_reverse_iterator I = Map.rbegin(), E = Map.rend(); - I != E; ++I) { + // TODO(stichnot): Use llvm::make_range with LLVM 3.5. + for (auto I = Map.rbegin(), E = Map.rend(); I != E; ++I) { char buf[80]; snprintf(buf, llvm::array_lengthof(buf), " %10.6f (%4.1f%%): ", I->first, I->first * 100 / TotalTime); diff --git a/src/IceTranslator.cpp b/src/IceTranslator.cpp index 9cc97a0d3..d11e76d25 100644 --- a/src/IceTranslator.cpp +++ b/src/IceTranslator.cpp @@ -58,19 +58,15 @@ void Translator::nameUnnamedGlobalAddresses(llvm::Module *Mod) { Ostream &errs = Ctx->getStrDump(); if (!GlobalPrefix.empty()) { uint32_t NameIndex = 0; - for (llvm::Module::global_iterator I = Mod->global_begin(), - E = Mod->global_end(); - I != E; ++I) { + for (auto I = Mod->global_begin(), E = Mod->global_end(); I != E; ++I) setValueName(I, "global", GlobalPrefix, NameIndex, errs); - } } const IceString &FunctionPrefix = Flags.DefaultFunctionPrefix; if (FunctionPrefix.empty()) return; uint32_t NameIndex = 0; - for (llvm::Module::iterator I = Mod->begin(), E = Mod->end(); I != E; ++I) { - setValueName(I, "function", FunctionPrefix, NameIndex, errs); - } + for (llvm::Function &I : *Mod) + setValueName(&I, "function", FunctionPrefix, NameIndex, errs); } void Translator::translateFcn(Cfg *Fcn) { @@ -100,9 +96,7 @@ void Translator::emitConstants() { void Translator::convertGlobals(llvm::Module *Mod) { std::unique_ptr GlobalLowering( TargetGlobalInitLowering::createLowering(Ctx->getTargetArch(), Ctx)); - for (llvm::Module::const_global_iterator I = Mod->global_begin(), - E = Mod->global_end(); - I != E; ++I) { + for (auto I = Mod->global_begin(), E = Mod->global_end(); I != E; ++I) { if (!I->hasInitializer()) continue; const llvm::Constant *Initializer = I->getInitializer(); diff --git a/src/IceTypeConverter.h b/src/IceTypeConverter.h index 089c923a3..c06719167 100644 --- a/src/IceTypeConverter.h +++ b/src/IceTypeConverter.h @@ -44,7 +44,7 @@ public: /// Converts LLVM type LLVMTy to an ICE type. Returns /// Ice::IceType_NUM if unable to convert. Type convertToIceType(llvm::Type *LLVMTy) const { - std::map::const_iterator Pos = LLVM2IceMap.find(LLVMTy); + auto Pos = LLVM2IceMap.find(LLVMTy); if (Pos == LLVM2IceMap.end()) return convertToIceTypeOther(LLVMTy); return Pos->second; diff --git a/src/PNaClTranslator.cpp b/src/PNaClTranslator.cpp index c160daa6f..0571b297a 100644 --- a/src/PNaClTranslator.cpp +++ b/src/PNaClTranslator.cpp @@ -847,8 +847,7 @@ public: Func->setInternal(LLVMFunc->hasInternalLinkage()); CurrentNode = InstallNextBasicBlock(); Func->setEntryNode(CurrentNode); - for (Function::const_arg_iterator ArgI = LLVMFunc->arg_begin(), - ArgE = LLVMFunc->arg_end(); + for (auto ArgI = LLVMFunc->arg_begin(), ArgE = LLVMFunc->arg_end(); ArgI != ArgE; ++ArgI) { Func->addArg(getNextInstVar(Context->convertToIceType(ArgI->getType()))); } @@ -1387,11 +1386,7 @@ void FunctionParser::ExitBlock() { // Before translating, check for blocks without instructions, and // insert unreachable. This shouldn't happen, but be safe. unsigned Index = 0; - const Ice::NodeList &Nodes = Func->getNodes(); - for (std::vector::const_iterator Iter = Nodes.begin(), - IterEnd = Nodes.end(); - Iter != IterEnd; ++Iter, ++Index) { - Ice::CfgNode *Node = *Iter; + for (Ice::CfgNode *Node : Func->getNodes()) { if (Node->getInsts().size() == 0) { std::string Buffer; raw_string_ostream StrBuf(Buffer); @@ -1400,6 +1395,7 @@ void FunctionParser::ExitBlock() { // TODO(kschimpf) Remove error recovery once implementation complete. Node->appendInst(Ice::InstUnreachable::create(Func)); } + ++Index; } Func->computePredecessors(); // Note: Once any errors have been found, we turn off all -- 2.11.0