1 //===-- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator --*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
19 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineModuleInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/IR/Constant.h"
27 #include "llvm/IR/DebugInfo.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/GetElementPtrTypeIterator.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/Type.h"
32 #include "llvm/IR/Value.h"
33 #include "llvm/Target/TargetFrameLowering.h"
34 #include "llvm/Target/TargetIntrinsicInfo.h"
35 #include "llvm/Target/TargetLowering.h"
37 #define DEBUG_TYPE "irtranslator"
41 char IRTranslator::ID = 0;
42 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
44 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
45 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
48 static void reportTranslationError(MachineFunction &MF,
49 const TargetPassConfig &TPC,
50 OptimizationRemarkEmitter &ORE,
51 OptimizationRemarkMissed &R) {
52 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
54 // Print the function name explicitly if we don't have a debug location (which
55 // makes the diagnostic less useful) or if we're going to emit a raw error.
56 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
57 R << (" (in function: " + MF.getName() + ")").str();
59 if (TPC.isGlobalISelAbortEnabled())
60 report_fatal_error(R.getMsg());
65 IRTranslator::IRTranslator() : MachineFunctionPass(ID), MRI(nullptr) {
66 initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
69 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
70 AU.addRequired<TargetPassConfig>();
71 MachineFunctionPass::getAnalysisUsage(AU);
75 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
76 unsigned &ValReg = ValToVReg[&Val];
81 // Fill ValRegsSequence with the sequence of registers
82 // we need to concat together to produce the value.
83 assert(Val.getType()->isSized() &&
84 "Don't know how to create an empty vreg");
86 MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL));
89 if (auto CV = dyn_cast<Constant>(&Val)) {
90 bool Success = translate(*CV, VReg);
92 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
93 MF->getFunction()->getSubprogram(),
94 &MF->getFunction()->getEntryBlock());
95 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
96 reportTranslationError(*MF, *TPC, *ORE, R);
104 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
105 if (FrameIndices.find(&AI) != FrameIndices.end())
106 return FrameIndices[&AI];
108 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
110 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
112 // Always allocate at least one byte.
113 Size = std::max(Size, 1u);
115 unsigned Alignment = AI.getAlignment();
117 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
119 int &FI = FrameIndices[&AI];
120 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
124 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
125 unsigned Alignment = 0;
126 Type *ValTy = nullptr;
127 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
128 Alignment = SI->getAlignment();
129 ValTy = SI->getValueOperand()->getType();
130 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
131 Alignment = LI->getAlignment();
132 ValTy = LI->getType();
134 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
135 R << "unable to translate memop: " << ore::NV("Opcode", &I);
136 reportTranslationError(*MF, *TPC, *ORE, R);
140 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
143 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
144 MachineBasicBlock *&MBB = BBToMBB[&BB];
145 assert(MBB && "BasicBlock was not encountered before");
149 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
150 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
151 MachinePreds[Edge].push_back(NewPred);
154 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
155 MachineIRBuilder &MIRBuilder) {
156 // FIXME: handle signed/unsigned wrapping flags.
158 // Get or create a virtual register for each value.
159 // Unless the value is a Constant => loadimm cst?
160 // or inline constant each time?
161 // Creation of a virtual register needs to have a size.
162 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
163 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
164 unsigned Res = getOrCreateVReg(U);
165 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
169 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
170 // -0.0 - X --> G_FNEG
171 if (isa<Constant>(U.getOperand(0)) &&
172 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
173 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
174 .addDef(getOrCreateVReg(U))
175 .addUse(getOrCreateVReg(*U.getOperand(1)));
178 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
181 bool IRTranslator::translateCompare(const User &U,
182 MachineIRBuilder &MIRBuilder) {
183 const CmpInst *CI = dyn_cast<CmpInst>(&U);
184 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
185 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
186 unsigned Res = getOrCreateVReg(U);
187 CmpInst::Predicate Pred =
188 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
189 cast<ConstantExpr>(U).getPredicate());
190 if (CmpInst::isIntPredicate(Pred))
191 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
192 else if (Pred == CmpInst::FCMP_FALSE)
193 MIRBuilder.buildConstant(Res, 0);
194 else if (Pred == CmpInst::FCMP_TRUE)
195 MIRBuilder.buildConstant(Res, 1);
197 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
202 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
203 const ReturnInst &RI = cast<ReturnInst>(U);
204 const Value *Ret = RI.getReturnValue();
205 // The target may mess up with the insertion point, but
206 // this is not important as a return is the last instruction
207 // of the block anyway.
208 return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
211 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
212 const BranchInst &BrInst = cast<BranchInst>(U);
214 if (!BrInst.isUnconditional()) {
215 // We want a G_BRCOND to the true BB followed by an unconditional branch.
216 unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
217 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
218 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
219 MIRBuilder.buildBrCond(Tst, TrueBB);
222 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
223 MachineBasicBlock &TgtBB = getMBB(BrTgt);
224 MIRBuilder.buildBr(TgtBB);
227 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
228 for (const BasicBlock *Succ : BrInst.successors())
229 CurBB.addSuccessor(&getMBB(*Succ));
233 bool IRTranslator::translateSwitch(const User &U,
234 MachineIRBuilder &MIRBuilder) {
235 // For now, just translate as a chain of conditional branches.
236 // FIXME: could we share most of the logic/code in
237 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
238 // At first sight, it seems most of the logic in there is independent of
239 // SelectionDAG-specifics and a lot of work went in to optimize switch
240 // lowering in there.
242 const SwitchInst &SwInst = cast<SwitchInst>(U);
243 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
244 const BasicBlock *OrigBB = SwInst.getParent();
246 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
247 for (auto &CaseIt : SwInst.cases()) {
248 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
249 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
250 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
251 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
252 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
253 MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
255 MIRBuilder.buildBrCond(Tst, TrueMBB);
256 CurMBB.addSuccessor(&TrueMBB);
257 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
259 MachineBasicBlock *FalseMBB =
260 MF->CreateMachineBasicBlock(SwInst.getParent());
261 // Insert the comparison blocks one after the other.
262 MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
263 MIRBuilder.buildBr(*FalseMBB);
264 CurMBB.addSuccessor(FalseMBB);
266 MIRBuilder.setMBB(*FalseMBB);
268 // handle default case
269 const BasicBlock *DefaultBB = SwInst.getDefaultDest();
270 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
271 MIRBuilder.buildBr(DefaultMBB);
272 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
273 CurMBB.addSuccessor(&DefaultMBB);
274 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
279 bool IRTranslator::translateIndirectBr(const User &U,
280 MachineIRBuilder &MIRBuilder) {
281 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
283 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
284 MIRBuilder.buildBrIndirect(Tgt);
287 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
288 for (const BasicBlock *Succ : BrInst.successors())
289 CurBB.addSuccessor(&getMBB(*Succ));
294 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
295 const LoadInst &LI = cast<LoadInst>(U);
297 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
298 : MachineMemOperand::MONone;
299 Flags |= MachineMemOperand::MOLoad;
301 unsigned Res = getOrCreateVReg(LI);
302 unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
304 MIRBuilder.buildLoad(
306 *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
307 Flags, DL->getTypeStoreSize(LI.getType()),
308 getMemOpAlignment(LI), AAMDNodes(), nullptr,
309 LI.getSynchScope(), LI.getOrdering()));
313 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
314 const StoreInst &SI = cast<StoreInst>(U);
315 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
316 : MachineMemOperand::MONone;
317 Flags |= MachineMemOperand::MOStore;
319 unsigned Val = getOrCreateVReg(*SI.getValueOperand());
320 unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
322 MIRBuilder.buildStore(
324 *MF->getMachineMemOperand(
325 MachinePointerInfo(SI.getPointerOperand()), Flags,
326 DL->getTypeStoreSize(SI.getValueOperand()->getType()),
327 getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSynchScope(),
332 bool IRTranslator::translateExtractValue(const User &U,
333 MachineIRBuilder &MIRBuilder) {
334 const Value *Src = U.getOperand(0);
335 Type *Int32Ty = Type::getInt32Ty(U.getContext());
336 SmallVector<Value *, 1> Indices;
338 // getIndexedOffsetInType is designed for GEPs, so the first index is the
339 // usual array element rather than looking into the actual aggregate.
340 Indices.push_back(ConstantInt::get(Int32Ty, 0));
342 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
343 for (auto Idx : EVI->indices())
344 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
346 for (unsigned i = 1; i < U.getNumOperands(); ++i)
347 Indices.push_back(U.getOperand(i));
350 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
352 unsigned Res = getOrCreateVReg(U);
353 MIRBuilder.buildExtract(Res, getOrCreateVReg(*Src), Offset);
358 bool IRTranslator::translateInsertValue(const User &U,
359 MachineIRBuilder &MIRBuilder) {
360 const Value *Src = U.getOperand(0);
361 Type *Int32Ty = Type::getInt32Ty(U.getContext());
362 SmallVector<Value *, 1> Indices;
364 // getIndexedOffsetInType is designed for GEPs, so the first index is the
365 // usual array element rather than looking into the actual aggregate.
366 Indices.push_back(ConstantInt::get(Int32Ty, 0));
368 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
369 for (auto Idx : IVI->indices())
370 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
372 for (unsigned i = 2; i < U.getNumOperands(); ++i)
373 Indices.push_back(U.getOperand(i));
376 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
378 unsigned Res = getOrCreateVReg(U);
379 const Value &Inserted = *U.getOperand(1);
380 MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), getOrCreateVReg(Inserted),
386 bool IRTranslator::translateSelect(const User &U,
387 MachineIRBuilder &MIRBuilder) {
388 MIRBuilder.buildSelect(getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
389 getOrCreateVReg(*U.getOperand(1)),
390 getOrCreateVReg(*U.getOperand(2)));
394 bool IRTranslator::translateBitCast(const User &U,
395 MachineIRBuilder &MIRBuilder) {
396 // If we're bitcasting to the source type, we can reuse the source vreg.
397 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
398 getLLTForType(*U.getType(), *DL)) {
399 // Get the source vreg now, to avoid invalidating ValToVReg.
400 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
401 unsigned &Reg = ValToVReg[&U];
402 // If we already assigned a vreg for this bitcast, we can't change that.
403 // Emit a copy to satisfy the users we already emitted.
405 MIRBuilder.buildCopy(Reg, SrcReg);
410 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
413 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
414 MachineIRBuilder &MIRBuilder) {
415 unsigned Op = getOrCreateVReg(*U.getOperand(0));
416 unsigned Res = getOrCreateVReg(U);
417 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
421 bool IRTranslator::translateGetElementPtr(const User &U,
422 MachineIRBuilder &MIRBuilder) {
423 // FIXME: support vector GEPs.
424 if (U.getType()->isVectorTy())
427 Value &Op0 = *U.getOperand(0);
428 unsigned BaseReg = getOrCreateVReg(Op0);
429 LLT PtrTy = getLLTForType(*Op0.getType(), *DL);
430 unsigned PtrSize = DL->getPointerSizeInBits(PtrTy.getAddressSpace());
431 LLT OffsetTy = LLT::scalar(PtrSize);
434 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
436 const Value *Idx = GTI.getOperand();
437 if (StructType *StTy = GTI.getStructTypeOrNull()) {
438 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
439 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
442 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
444 // If this is a scalar constant or a splat vector of constants,
445 // handle it quickly.
446 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
447 Offset += ElementSize * CI->getSExtValue();
452 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
453 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
454 MIRBuilder.buildConstant(OffsetReg, Offset);
455 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
457 BaseReg = NewBaseReg;
461 // N = N + Idx * ElementSize;
462 unsigned ElementSizeReg = MRI->createGenericVirtualRegister(OffsetTy);
463 MIRBuilder.buildConstant(ElementSizeReg, ElementSize);
465 unsigned IdxReg = getOrCreateVReg(*Idx);
466 if (MRI->getType(IdxReg) != OffsetTy) {
467 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
468 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
472 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
473 MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
475 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
476 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
477 BaseReg = NewBaseReg;
482 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
483 MIRBuilder.buildConstant(OffsetReg, Offset);
484 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
488 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
492 bool IRTranslator::translateMemfunc(const CallInst &CI,
493 MachineIRBuilder &MIRBuilder,
495 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
496 Type *DstTy = CI.getArgOperand(0)->getType();
497 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
498 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
501 SmallVector<CallLowering::ArgInfo, 8> Args;
502 for (int i = 0; i < 3; ++i) {
503 const auto &Arg = CI.getArgOperand(i);
504 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
509 case Intrinsic::memmove:
510 case Intrinsic::memcpy: {
511 Type *SrcTy = CI.getArgOperand(1)->getType();
512 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
514 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
517 case Intrinsic::memset:
524 return CLI->lowerCall(MIRBuilder, MachineOperand::CreateES(Callee),
525 CallLowering::ArgInfo(0, CI.getType()), Args);
528 void IRTranslator::getStackGuard(unsigned DstReg,
529 MachineIRBuilder &MIRBuilder) {
530 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
531 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
532 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
535 auto &TLI = *MF->getSubtarget().getTargetLowering();
536 Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent());
540 MachinePointerInfo MPInfo(Global);
541 MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
542 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
543 MachineMemOperand::MODereferenceable;
545 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
546 DL->getPointerABIAlignment());
547 MIB.setMemRefs(MemRefs, MemRefs + 1);
550 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
551 MachineIRBuilder &MIRBuilder) {
552 LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL);
553 LLT s1 = LLT::scalar(1);
554 unsigned Width = Ty.getSizeInBits();
555 unsigned Res = MRI->createGenericVirtualRegister(Ty);
556 unsigned Overflow = MRI->createGenericVirtualRegister(s1);
557 auto MIB = MIRBuilder.buildInstr(Op)
560 .addUse(getOrCreateVReg(*CI.getOperand(0)))
561 .addUse(getOrCreateVReg(*CI.getOperand(1)));
563 if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
564 unsigned Zero = MRI->createGenericVirtualRegister(s1);
565 EntryBuilder.buildConstant(Zero, 0);
569 MIRBuilder.buildSequence(getOrCreateVReg(CI), Res, 0, Overflow, Width);
573 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
574 MachineIRBuilder &MIRBuilder) {
578 case Intrinsic::lifetime_start:
579 case Intrinsic::lifetime_end:
580 // Stack coloring is not enabled in O0 (which we care about now) so we can
581 // drop these. Make sure someone notices when we start compiling at higher
583 if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
586 case Intrinsic::dbg_declare: {
587 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
588 assert(DI.getVariable() && "Missing variable");
590 const Value *Address = DI.getAddress();
591 if (!Address || isa<UndefValue>(Address)) {
592 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
596 assert(DI.getVariable()->isValidLocationForIntrinsic(
597 MIRBuilder.getDebugLoc()) &&
598 "Expected inlined-at fields to agree");
599 auto AI = dyn_cast<AllocaInst>(Address);
600 if (AI && AI->isStaticAlloca()) {
601 // Static allocas are tracked at the MF level, no need for DBG_VALUE
602 // instructions (in fact, they get ignored if they *do* exist).
603 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
604 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
606 MIRBuilder.buildDirectDbgValue(getOrCreateVReg(*Address),
607 DI.getVariable(), DI.getExpression());
610 case Intrinsic::vaend:
611 // No target I know of cares about va_end. Certainly no in-tree target
612 // does. Simplest intrinsic ever!
614 case Intrinsic::vastart: {
615 auto &TLI = *MF->getSubtarget().getTargetLowering();
616 Value *Ptr = CI.getArgOperand(0);
617 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
619 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
620 .addUse(getOrCreateVReg(*Ptr))
621 .addMemOperand(MF->getMachineMemOperand(
622 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
625 case Intrinsic::dbg_value: {
626 // This form of DBG_VALUE is target-independent.
627 const DbgValueInst &DI = cast<DbgValueInst>(CI);
628 const Value *V = DI.getValue();
629 assert(DI.getVariable()->isValidLocationForIntrinsic(
630 MIRBuilder.getDebugLoc()) &&
631 "Expected inlined-at fields to agree");
633 // Currently the optimizer can produce this; insert an undef to
634 // help debugging. Probably the optimizer should not do this.
635 MIRBuilder.buildIndirectDbgValue(0, DI.getOffset(), DI.getVariable(),
637 } else if (const auto *CI = dyn_cast<Constant>(V)) {
638 MIRBuilder.buildConstDbgValue(*CI, DI.getOffset(), DI.getVariable(),
641 unsigned Reg = getOrCreateVReg(*V);
642 // FIXME: This does not handle register-indirect values at offset 0. The
643 // direct/indirect thing shouldn't really be handled by something as
644 // implicit as reg+noreg vs reg+imm in the first palce, but it seems
645 // pretty baked in right now.
646 if (DI.getOffset() != 0)
647 MIRBuilder.buildIndirectDbgValue(Reg, DI.getOffset(), DI.getVariable(),
650 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(),
655 case Intrinsic::uadd_with_overflow:
656 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
657 case Intrinsic::sadd_with_overflow:
658 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
659 case Intrinsic::usub_with_overflow:
660 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
661 case Intrinsic::ssub_with_overflow:
662 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
663 case Intrinsic::umul_with_overflow:
664 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
665 case Intrinsic::smul_with_overflow:
666 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
668 MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
669 .addDef(getOrCreateVReg(CI))
670 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
671 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
673 case Intrinsic::memcpy:
674 case Intrinsic::memmove:
675 case Intrinsic::memset:
676 return translateMemfunc(CI, MIRBuilder, ID);
677 case Intrinsic::eh_typeid_for: {
678 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
679 unsigned Reg = getOrCreateVReg(CI);
680 unsigned TypeID = MF->getTypeIDFor(GV);
681 MIRBuilder.buildConstant(Reg, TypeID);
684 case Intrinsic::objectsize: {
685 // If we don't know by now, we're never going to know.
686 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
688 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
691 case Intrinsic::stackguard:
692 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
694 case Intrinsic::stackprotector: {
695 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
696 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
697 getStackGuard(GuardVal, MIRBuilder);
699 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
700 MIRBuilder.buildStore(
701 GuardVal, getOrCreateVReg(*Slot),
702 *MF->getMachineMemOperand(
703 MachinePointerInfo::getFixedStack(*MF,
704 getOrCreateFrameIndex(*Slot)),
705 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
706 PtrTy.getSizeInBits() / 8, 8));
713 bool IRTranslator::translateInlineAsm(const CallInst &CI,
714 MachineIRBuilder &MIRBuilder) {
715 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
716 if (!IA.getConstraintString().empty())
719 unsigned ExtraInfo = 0;
720 if (IA.hasSideEffects())
721 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
722 if (IA.getDialect() == InlineAsm::AD_Intel)
723 ExtraInfo |= InlineAsm::Extra_AsmDialect;
725 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
726 .addExternalSymbol(IA.getAsmString().c_str())
732 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
733 const CallInst &CI = cast<CallInst>(U);
734 auto TII = MF->getTarget().getIntrinsicInfo();
735 const Function *F = CI.getCalledFunction();
737 if (CI.isInlineAsm())
738 return translateInlineAsm(CI, MIRBuilder);
740 if (!F || !F->isIntrinsic()) {
741 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
742 SmallVector<unsigned, 8> Args;
743 for (auto &Arg: CI.arg_operands())
744 Args.push_back(getOrCreateVReg(*Arg));
746 MF->getFrameInfo().setHasCalls(true);
747 return CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
748 return getOrCreateVReg(*CI.getCalledValue());
752 Intrinsic::ID ID = F->getIntrinsicID();
753 if (TII && ID == Intrinsic::not_intrinsic)
754 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
756 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
758 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
761 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
762 MachineInstrBuilder MIB =
763 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
765 for (auto &Arg : CI.arg_operands()) {
766 // Some intrinsics take metadata parameters. Reject them.
767 if (isa<MetadataAsValue>(Arg))
769 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
770 MIB.addImm(CI->getSExtValue());
772 MIB.addUse(getOrCreateVReg(*Arg));
777 bool IRTranslator::translateInvoke(const User &U,
778 MachineIRBuilder &MIRBuilder) {
779 const InvokeInst &I = cast<InvokeInst>(U);
780 MCContext &Context = MF->getContext();
782 const BasicBlock *ReturnBB = I.getSuccessor(0);
783 const BasicBlock *EHPadBB = I.getSuccessor(1);
785 const Value *Callee = I.getCalledValue();
786 const Function *Fn = dyn_cast<Function>(Callee);
787 if (isa<InlineAsm>(Callee))
790 // FIXME: support invoking patchpoint and statepoint intrinsics.
791 if (Fn && Fn->isIntrinsic())
794 // FIXME: support whatever these are.
795 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
798 // FIXME: support Windows exception handling.
799 if (!isa<LandingPadInst>(EHPadBB->front()))
803 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
804 // the region covered by the try.
805 MCSymbol *BeginSymbol = Context.createTempSymbol();
806 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
808 unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
809 SmallVector<unsigned, 8> Args;
810 for (auto &Arg: I.arg_operands())
811 Args.push_back(getOrCreateVReg(*Arg));
813 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
814 [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
817 MCSymbol *EndSymbol = Context.createTempSymbol();
818 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
820 // FIXME: track probabilities.
821 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
822 &ReturnMBB = getMBB(*ReturnBB);
823 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
824 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
825 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
826 MIRBuilder.buildBr(ReturnMBB);
831 bool IRTranslator::translateLandingPad(const User &U,
832 MachineIRBuilder &MIRBuilder) {
833 const LandingPadInst &LP = cast<LandingPadInst>(U);
835 MachineBasicBlock &MBB = MIRBuilder.getMBB();
836 addLandingPadInfo(LP, MBB);
840 // If there aren't registers to copy the values into (e.g., during SjLj
841 // exceptions), then don't bother.
842 auto &TLI = *MF->getSubtarget().getTargetLowering();
843 const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn();
844 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
845 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
848 // If landingpad's return type is token type, we don't create DAG nodes
849 // for its exception pointer and selector value. The extraction of exception
850 // pointer or selector value from token type landingpads is not currently
852 if (LP.getType()->isTokenTy())
855 // Add a label to mark the beginning of the landing pad. Deletion of the
856 // landing pad can thus be detected via the MachineModuleInfo.
857 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
858 .addSym(MF->addLandingPad(&MBB));
860 LLT Ty = getLLTForType(*LP.getType(), *DL);
861 unsigned Undef = MRI->createGenericVirtualRegister(Ty);
862 MIRBuilder.buildUndef(Undef);
864 SmallVector<LLT, 2> Tys;
865 for (Type *Ty : cast<StructType>(LP.getType())->elements())
866 Tys.push_back(getLLTForType(*Ty, *DL));
867 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
869 // Mark exception register as live in.
870 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
874 MBB.addLiveIn(ExceptionReg);
875 unsigned VReg = MRI->createGenericVirtualRegister(Tys[0]),
876 Tmp = MRI->createGenericVirtualRegister(Ty);
877 MIRBuilder.buildCopy(VReg, ExceptionReg);
878 MIRBuilder.buildInsert(Tmp, Undef, VReg, 0);
880 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
884 MBB.addLiveIn(SelectorReg);
886 // N.b. the exception selector register always has pointer type and may not
887 // match the actual IR-level type in the landingpad so an extra cast is
889 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
890 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
892 VReg = MRI->createGenericVirtualRegister(Tys[1]);
893 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT).addDef(VReg).addUse(PtrVReg);
894 MIRBuilder.buildInsert(getOrCreateVReg(LP), Tmp, VReg,
895 Tys[0].getSizeInBits());
899 bool IRTranslator::translateAlloca(const User &U,
900 MachineIRBuilder &MIRBuilder) {
901 auto &AI = cast<AllocaInst>(U);
903 if (AI.isStaticAlloca()) {
904 unsigned Res = getOrCreateVReg(AI);
905 int FI = getOrCreateFrameIndex(AI);
906 MIRBuilder.buildFrameIndex(Res, FI);
910 // Now we're in the harder dynamic case.
911 Type *Ty = AI.getAllocatedType();
913 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
915 unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
917 LLT IntPtrTy = LLT::scalar(DL->getPointerSizeInBits());
918 if (MRI->getType(NumElts) != IntPtrTy) {
919 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
920 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
924 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
925 unsigned TySize = MRI->createGenericVirtualRegister(IntPtrTy);
926 MIRBuilder.buildConstant(TySize, -DL->getTypeAllocSize(Ty));
927 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
929 LLT PtrTy = getLLTForType(*AI.getType(), *DL);
930 auto &TLI = *MF->getSubtarget().getTargetLowering();
931 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
933 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
934 MIRBuilder.buildCopy(SPTmp, SPReg);
936 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
937 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
939 // Handle alignment. We have to realign if the allocation granule was smaller
940 // than stack alignment, or the specific alloca requires more than stack
942 unsigned StackAlign =
943 MF->getSubtarget().getFrameLowering()->getStackAlignment();
944 Align = std::max(Align, StackAlign);
945 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
946 // Round the size of the allocation up to the stack alignment size
947 // by add SA-1 to the size. This doesn't overflow because we're computing
948 // an address inside an alloca.
949 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
950 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
951 AllocTmp = AlignedAlloc;
954 MIRBuilder.buildCopy(SPReg, AllocTmp);
955 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
957 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
958 assert(MF->getFrameInfo().hasVarSizedObjects());
962 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
963 // FIXME: We may need more info about the type. Because of how LLT works,
964 // we're completely discarding the i64/double distinction here (amongst
965 // others). Fortunately the ABIs I know of where that matters don't use va_arg
966 // anyway but that's not guaranteed.
967 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
968 .addDef(getOrCreateVReg(U))
969 .addUse(getOrCreateVReg(*U.getOperand(0)))
970 .addImm(DL->getABITypeAlignment(U.getType()));
974 bool IRTranslator::translateInsertElement(const User &U,
975 MachineIRBuilder &MIRBuilder) {
976 // If it is a <1 x Ty> vector, use the scalar as it is
977 // not a legal vector type in LLT.
978 if (U.getType()->getVectorNumElements() == 1) {
979 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
983 MIRBuilder.buildInsertVectorElement(
984 getOrCreateVReg(U), getOrCreateVReg(*U.getOperand(0)),
985 getOrCreateVReg(*U.getOperand(1)), getOrCreateVReg(*U.getOperand(2)));
989 bool IRTranslator::translateExtractElement(const User &U,
990 MachineIRBuilder &MIRBuilder) {
991 // If it is a <1 x Ty> vector, use the scalar as it is
992 // not a legal vector type in LLT.
993 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
994 unsigned Elt = getOrCreateVReg(*U.getOperand(0));
998 MIRBuilder.buildExtractVectorElement(getOrCreateVReg(U),
999 getOrCreateVReg(*U.getOperand(0)),
1000 getOrCreateVReg(*U.getOperand(1)));
1004 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1005 const PHINode &PI = cast<PHINode>(U);
1006 auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
1007 MIB.addDef(getOrCreateVReg(PI));
1009 PendingPHIs.emplace_back(&PI, MIB.getInstr());
1013 void IRTranslator::finishPendingPhis() {
1014 for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
1015 const PHINode *PI = Phi.first;
1016 MachineInstrBuilder MIB(*MF, Phi.second);
1018 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1019 // won't create extra control flow here, otherwise we need to find the
1020 // dominating predecessor here (or perhaps force the weirder IRTranslators
1021 // to provide a simple boundary).
1022 SmallSet<const BasicBlock *, 4> HandledPreds;
1024 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1025 auto IRPred = PI->getIncomingBlock(i);
1026 if (HandledPreds.count(IRPred))
1029 HandledPreds.insert(IRPred);
1030 unsigned ValReg = getOrCreateVReg(*PI->getIncomingValue(i));
1031 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1032 assert(Pred->isSuccessor(MIB->getParent()) &&
1033 "incorrect CFG at MachineBasicBlock level");
1041 bool IRTranslator::translate(const Instruction &Inst) {
1042 CurBuilder.setDebugLoc(Inst.getDebugLoc());
1043 switch(Inst.getOpcode()) {
1044 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1045 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
1046 #include "llvm/IR/Instruction.def"
1052 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1053 if (auto CI = dyn_cast<ConstantInt>(&C))
1054 EntryBuilder.buildConstant(Reg, *CI);
1055 else if (auto CF = dyn_cast<ConstantFP>(&C))
1056 EntryBuilder.buildFConstant(Reg, *CF);
1057 else if (isa<UndefValue>(C))
1058 EntryBuilder.buildUndef(Reg);
1059 else if (isa<ConstantPointerNull>(C))
1060 EntryBuilder.buildConstant(Reg, 0);
1061 else if (auto GV = dyn_cast<GlobalValue>(&C))
1062 EntryBuilder.buildGlobalValue(Reg, GV);
1063 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1064 if (!CAZ->getType()->isVectorTy())
1066 // Return the scalar if it is a <1 x Ty> vector.
1067 if (CAZ->getNumElements() == 1)
1068 return translate(*CAZ->getElementValue(0u), Reg);
1069 std::vector<unsigned> Ops;
1070 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1071 Constant &Elt = *CAZ->getElementValue(i);
1072 Ops.push_back(getOrCreateVReg(Elt));
1074 EntryBuilder.buildMerge(Reg, Ops);
1075 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1076 // Return the scalar if it is a <1 x Ty> vector.
1077 if (CV->getNumElements() == 1)
1078 return translate(*CV->getElementAsConstant(0), Reg);
1079 std::vector<unsigned> Ops;
1080 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1081 Constant &Elt = *CV->getElementAsConstant(i);
1082 Ops.push_back(getOrCreateVReg(Elt));
1084 EntryBuilder.buildMerge(Reg, Ops);
1085 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1086 switch(CE->getOpcode()) {
1087 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1088 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
1089 #include "llvm/IR/Instruction.def"
1099 void IRTranslator::finalizeFunction() {
1100 // Release the memory used by the different maps we
1101 // needed during the translation.
1102 PendingPHIs.clear();
1104 FrameIndices.clear();
1105 MachinePreds.clear();
1108 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
1110 const Function &F = *MF->getFunction();
1113 CLI = MF->getSubtarget().getCallLowering();
1114 CurBuilder.setMF(*MF);
1115 EntryBuilder.setMF(*MF);
1116 MRI = &MF->getRegInfo();
1117 DL = &F.getParent()->getDataLayout();
1118 TPC = &getAnalysis<TargetPassConfig>();
1119 ORE = make_unique<OptimizationRemarkEmitter>(&F);
1121 assert(PendingPHIs.empty() && "stale PHIs");
1123 // Release the per-function state when we return, whether we succeeded or not.
1124 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1126 // Setup a separate basic-block for the arguments and constants
1127 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1128 MF->push_back(EntryBB);
1129 EntryBuilder.setMBB(*EntryBB);
1131 // Create all blocks, in IR order, to preserve the layout.
1132 for (const BasicBlock &BB: F) {
1133 auto *&MBB = BBToMBB[&BB];
1135 MBB = MF->CreateMachineBasicBlock(&BB);
1138 if (BB.hasAddressTaken())
1139 MBB->setHasAddressTaken();
1142 // Make our arguments/constants entry block fallthrough to the IR entry block.
1143 EntryBB->addSuccessor(&getMBB(F.front()));
1145 // Lower the actual args into this basic block.
1146 SmallVector<unsigned, 8> VRegArgs;
1147 for (const Argument &Arg: F.args())
1148 VRegArgs.push_back(getOrCreateVReg(Arg));
1149 if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
1150 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1151 MF->getFunction()->getSubprogram(),
1152 &MF->getFunction()->getEntryBlock());
1153 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1154 reportTranslationError(*MF, *TPC, *ORE, R);
1158 // And translate the function!
1159 for (const BasicBlock &BB: F) {
1160 MachineBasicBlock &MBB = getMBB(BB);
1161 // Set the insertion point of all the following translations to
1162 // the end of this basic block.
1163 CurBuilder.setMBB(MBB);
1165 for (const Instruction &Inst: BB) {
1166 if (translate(Inst))
1169 std::string InstStrStorage;
1170 raw_string_ostream InstStr(InstStrStorage);
1173 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1174 Inst.getDebugLoc(), &BB);
1175 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst)
1176 << ": '" << InstStr.str() << "'";
1177 reportTranslationError(*MF, *TPC, *ORE, R);
1182 finishPendingPhis();
1184 // Now that the MachineFrameInfo has been configured, no further changes to
1185 // the reserved registers are possible.
1186 MRI->freezeReservedRegs(*MF);
1188 // Merge the argument lowering and constants block with its single
1189 // successor, the LLVM-IR entry block. We want the basic block to
1191 assert(EntryBB->succ_size() == 1 &&
1192 "Custom BB used for lowering should have only one successor");
1193 // Get the successor of the current entry block.
1194 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1195 assert(NewEntryBB.pred_size() == 1 &&
1196 "LLVM-IR entry block has a predecessor!?");
1197 // Move all the instruction from the current entry block to the
1199 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1202 // Update the live-in information for the new entry block.
1203 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1204 NewEntryBB.addLiveIn(LiveIn);
1205 NewEntryBB.sortUniqueLiveIns();
1207 // Get rid of the now empty basic block.
1208 EntryBB->removeSuccessor(&NewEntryBB);
1209 MF->remove(EntryBB);
1210 MF->DeleteMachineBasicBlock(EntryBB);
1212 assert(&MF->front() == &NewEntryBB &&
1213 "New entry wasn't next in the list of basic block!");