1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
21 #include "llvm/CodeGen/LowLevelType.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineMemOperand.h"
27 #include "llvm/CodeGen/MachineOperand.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetPassConfig.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/DebugInfo.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GetElementPtrTypeIterator.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/InstrTypes.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/LLVMContext.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/User.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/MC/MCContext.h"
49 #include "llvm/Pass.h"
50 #include "llvm/Support/Casting.h"
51 #include "llvm/Support/CodeGen.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/LowLevelTypeImpl.h"
55 #include "llvm/Support/MathExtras.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Target/TargetFrameLowering.h"
58 #include "llvm/Target/TargetIntrinsicInfo.h"
59 #include "llvm/Target/TargetLowering.h"
60 #include "llvm/Target/TargetMachine.h"
61 #include "llvm/Target/TargetRegisterInfo.h"
62 #include "llvm/Target/TargetSubtargetInfo.h"
71 #define DEBUG_TYPE "irtranslator"
75 char IRTranslator::ID = 0;
77 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
79 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
80 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
83 static void reportTranslationError(MachineFunction &MF,
84 const TargetPassConfig &TPC,
85 OptimizationRemarkEmitter &ORE,
86 OptimizationRemarkMissed &R) {
87 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
89 // Print the function name explicitly if we don't have a debug location (which
90 // makes the diagnostic less useful) or if we're going to emit a raw error.
91 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
92 R << (" (in function: " + MF.getName() + ")").str();
94 if (TPC.isGlobalISelAbortEnabled())
95 report_fatal_error(R.getMsg());
100 IRTranslator::IRTranslator() : MachineFunctionPass(ID) {
101 initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
104 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
105 AU.addRequired<TargetPassConfig>();
106 MachineFunctionPass::getAnalysisUsage(AU);
109 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
110 unsigned &ValReg = ValToVReg[&Val];
115 // Fill ValRegsSequence with the sequence of registers
116 // we need to concat together to produce the value.
117 assert(Val.getType()->isSized() &&
118 "Don't know how to create an empty vreg");
120 MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL));
123 if (auto CV = dyn_cast<Constant>(&Val)) {
124 bool Success = translate(*CV, VReg);
126 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
127 MF->getFunction()->getSubprogram(),
128 &MF->getFunction()->getEntryBlock());
129 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
130 reportTranslationError(*MF, *TPC, *ORE, R);
138 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
139 if (FrameIndices.find(&AI) != FrameIndices.end())
140 return FrameIndices[&AI];
142 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
144 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
146 // Always allocate at least one byte.
147 Size = std::max(Size, 1u);
149 unsigned Alignment = AI.getAlignment();
151 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
153 int &FI = FrameIndices[&AI];
154 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
158 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
159 unsigned Alignment = 0;
160 Type *ValTy = nullptr;
161 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
162 Alignment = SI->getAlignment();
163 ValTy = SI->getValueOperand()->getType();
164 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
165 Alignment = LI->getAlignment();
166 ValTy = LI->getType();
168 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
169 R << "unable to translate memop: " << ore::NV("Opcode", &I);
170 reportTranslationError(*MF, *TPC, *ORE, R);
174 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
177 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
178 MachineBasicBlock *&MBB = BBToMBB[&BB];
179 assert(MBB && "BasicBlock was not encountered before");
183 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
184 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
185 MachinePreds[Edge].push_back(NewPred);
188 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
189 MachineIRBuilder &MIRBuilder) {
190 // FIXME: handle signed/unsigned wrapping flags.
192 // Get or create a virtual register for each value.
193 // Unless the value is a Constant => loadimm cst?
194 // or inline constant each time?
195 // Creation of a virtual register needs to have a size.
196 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
197 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
198 unsigned Res = getOrCreateVReg(U);
199 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
203 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
204 // -0.0 - X --> G_FNEG
205 if (isa<Constant>(U.getOperand(0)) &&
206 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
207 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
208 .addDef(getOrCreateVReg(U))
209 .addUse(getOrCreateVReg(*U.getOperand(1)));
212 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
215 bool IRTranslator::translateCompare(const User &U,
216 MachineIRBuilder &MIRBuilder) {
217 const CmpInst *CI = dyn_cast<CmpInst>(&U);
218 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
219 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
220 unsigned Res = getOrCreateVReg(U);
221 CmpInst::Predicate Pred =
222 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
223 cast<ConstantExpr>(U).getPredicate());
224 if (CmpInst::isIntPredicate(Pred))
225 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
226 else if (Pred == CmpInst::FCMP_FALSE)
227 MIRBuilder.buildCopy(
228 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
229 else if (Pred == CmpInst::FCMP_TRUE)
230 MIRBuilder.buildCopy(
231 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
233 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
238 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
239 const ReturnInst &RI = cast<ReturnInst>(U);
240 const Value *Ret = RI.getReturnValue();
241 // The target may mess up with the insertion point, but
242 // this is not important as a return is the last instruction
243 // of the block anyway.
244 return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
247 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
248 const BranchInst &BrInst = cast<BranchInst>(U);
250 if (!BrInst.isUnconditional()) {
251 // We want a G_BRCOND to the true BB followed by an unconditional branch.
252 unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
253 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
254 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
255 MIRBuilder.buildBrCond(Tst, TrueBB);
258 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
259 MachineBasicBlock &TgtBB = getMBB(BrTgt);
260 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
262 // If the unconditional target is the layout successor, fallthrough.
263 if (!CurBB.isLayoutSuccessor(&TgtBB))
264 MIRBuilder.buildBr(TgtBB);
267 for (const BasicBlock *Succ : BrInst.successors())
268 CurBB.addSuccessor(&getMBB(*Succ));
272 bool IRTranslator::translateSwitch(const User &U,
273 MachineIRBuilder &MIRBuilder) {
274 // For now, just translate as a chain of conditional branches.
275 // FIXME: could we share most of the logic/code in
276 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
277 // At first sight, it seems most of the logic in there is independent of
278 // SelectionDAG-specifics and a lot of work went in to optimize switch
279 // lowering in there.
281 const SwitchInst &SwInst = cast<SwitchInst>(U);
282 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
283 const BasicBlock *OrigBB = SwInst.getParent();
285 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
286 for (auto &CaseIt : SwInst.cases()) {
287 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
288 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
289 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
290 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
291 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
292 MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
294 MIRBuilder.buildBrCond(Tst, TrueMBB);
295 CurMBB.addSuccessor(&TrueMBB);
296 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
298 MachineBasicBlock *FalseMBB =
299 MF->CreateMachineBasicBlock(SwInst.getParent());
300 // Insert the comparison blocks one after the other.
301 MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
302 MIRBuilder.buildBr(*FalseMBB);
303 CurMBB.addSuccessor(FalseMBB);
305 MIRBuilder.setMBB(*FalseMBB);
307 // handle default case
308 const BasicBlock *DefaultBB = SwInst.getDefaultDest();
309 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
310 MIRBuilder.buildBr(DefaultMBB);
311 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
312 CurMBB.addSuccessor(&DefaultMBB);
313 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
318 bool IRTranslator::translateIndirectBr(const User &U,
319 MachineIRBuilder &MIRBuilder) {
320 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
322 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
323 MIRBuilder.buildBrIndirect(Tgt);
326 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
327 for (const BasicBlock *Succ : BrInst.successors())
328 CurBB.addSuccessor(&getMBB(*Succ));
333 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
334 const LoadInst &LI = cast<LoadInst>(U);
336 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
337 : MachineMemOperand::MONone;
338 Flags |= MachineMemOperand::MOLoad;
340 unsigned Res = getOrCreateVReg(LI);
341 unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
343 MIRBuilder.buildLoad(
345 *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
346 Flags, DL->getTypeStoreSize(LI.getType()),
347 getMemOpAlignment(LI), AAMDNodes(), nullptr,
348 LI.getSyncScopeID(), LI.getOrdering()));
352 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
353 const StoreInst &SI = cast<StoreInst>(U);
354 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
355 : MachineMemOperand::MONone;
356 Flags |= MachineMemOperand::MOStore;
358 unsigned Val = getOrCreateVReg(*SI.getValueOperand());
359 unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
361 MIRBuilder.buildStore(
363 *MF->getMachineMemOperand(
364 MachinePointerInfo(SI.getPointerOperand()), Flags,
365 DL->getTypeStoreSize(SI.getValueOperand()->getType()),
366 getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(),
371 bool IRTranslator::translateExtractValue(const User &U,
372 MachineIRBuilder &MIRBuilder) {
373 const Value *Src = U.getOperand(0);
374 Type *Int32Ty = Type::getInt32Ty(U.getContext());
375 SmallVector<Value *, 1> Indices;
377 // If Src is a single element ConstantStruct, translate extractvalue
378 // to that element to avoid inserting a cast instruction.
379 if (auto CS = dyn_cast<ConstantStruct>(Src))
380 if (CS->getNumOperands() == 1) {
381 unsigned Res = getOrCreateVReg(*CS->getOperand(0));
386 // getIndexedOffsetInType is designed for GEPs, so the first index is the
387 // usual array element rather than looking into the actual aggregate.
388 Indices.push_back(ConstantInt::get(Int32Ty, 0));
390 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
391 for (auto Idx : EVI->indices())
392 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
394 for (unsigned i = 1; i < U.getNumOperands(); ++i)
395 Indices.push_back(U.getOperand(i));
398 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
400 unsigned Res = getOrCreateVReg(U);
401 MIRBuilder.buildExtract(Res, getOrCreateVReg(*Src), Offset);
406 bool IRTranslator::translateInsertValue(const User &U,
407 MachineIRBuilder &MIRBuilder) {
408 const Value *Src = U.getOperand(0);
409 Type *Int32Ty = Type::getInt32Ty(U.getContext());
410 SmallVector<Value *, 1> Indices;
412 // getIndexedOffsetInType is designed for GEPs, so the first index is the
413 // usual array element rather than looking into the actual aggregate.
414 Indices.push_back(ConstantInt::get(Int32Ty, 0));
416 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
417 for (auto Idx : IVI->indices())
418 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
420 for (unsigned i = 2; i < U.getNumOperands(); ++i)
421 Indices.push_back(U.getOperand(i));
424 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
426 unsigned Res = getOrCreateVReg(U);
427 unsigned Inserted = getOrCreateVReg(*U.getOperand(1));
428 MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), Inserted, Offset);
433 bool IRTranslator::translateSelect(const User &U,
434 MachineIRBuilder &MIRBuilder) {
435 unsigned Res = getOrCreateVReg(U);
436 unsigned Tst = getOrCreateVReg(*U.getOperand(0));
437 unsigned Op0 = getOrCreateVReg(*U.getOperand(1));
438 unsigned Op1 = getOrCreateVReg(*U.getOperand(2));
439 MIRBuilder.buildSelect(Res, Tst, Op0, Op1);
443 bool IRTranslator::translateBitCast(const User &U,
444 MachineIRBuilder &MIRBuilder) {
445 // If we're bitcasting to the source type, we can reuse the source vreg.
446 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
447 getLLTForType(*U.getType(), *DL)) {
448 // Get the source vreg now, to avoid invalidating ValToVReg.
449 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
450 unsigned &Reg = ValToVReg[&U];
451 // If we already assigned a vreg for this bitcast, we can't change that.
452 // Emit a copy to satisfy the users we already emitted.
454 MIRBuilder.buildCopy(Reg, SrcReg);
459 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
462 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
463 MachineIRBuilder &MIRBuilder) {
464 unsigned Op = getOrCreateVReg(*U.getOperand(0));
465 unsigned Res = getOrCreateVReg(U);
466 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
470 bool IRTranslator::translateGetElementPtr(const User &U,
471 MachineIRBuilder &MIRBuilder) {
472 // FIXME: support vector GEPs.
473 if (U.getType()->isVectorTy())
476 Value &Op0 = *U.getOperand(0);
477 unsigned BaseReg = getOrCreateVReg(Op0);
478 Type *PtrIRTy = Op0.getType();
479 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
480 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
481 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
484 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
486 const Value *Idx = GTI.getOperand();
487 if (StructType *StTy = GTI.getStructTypeOrNull()) {
488 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
489 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
492 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
494 // If this is a scalar constant or a splat vector of constants,
495 // handle it quickly.
496 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
497 Offset += ElementSize * CI->getSExtValue();
502 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
504 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
505 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
507 BaseReg = NewBaseReg;
511 // N = N + Idx * ElementSize;
512 unsigned ElementSizeReg =
513 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
515 unsigned IdxReg = getOrCreateVReg(*Idx);
516 if (MRI->getType(IdxReg) != OffsetTy) {
517 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
518 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
522 unsigned OffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
523 MIRBuilder.buildMul(OffsetReg, ElementSizeReg, IdxReg);
525 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
526 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
527 BaseReg = NewBaseReg;
532 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
533 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
537 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
541 bool IRTranslator::translateMemfunc(const CallInst &CI,
542 MachineIRBuilder &MIRBuilder,
544 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
545 Type *DstTy = CI.getArgOperand(0)->getType();
546 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
547 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
550 SmallVector<CallLowering::ArgInfo, 8> Args;
551 for (int i = 0; i < 3; ++i) {
552 const auto &Arg = CI.getArgOperand(i);
553 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
558 case Intrinsic::memmove:
559 case Intrinsic::memcpy: {
560 Type *SrcTy = CI.getArgOperand(1)->getType();
561 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
563 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
566 case Intrinsic::memset:
573 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
574 MachineOperand::CreateES(Callee),
575 CallLowering::ArgInfo(0, CI.getType()), Args);
578 void IRTranslator::getStackGuard(unsigned DstReg,
579 MachineIRBuilder &MIRBuilder) {
580 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
581 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
582 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
585 auto &TLI = *MF->getSubtarget().getTargetLowering();
586 Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent());
590 MachinePointerInfo MPInfo(Global);
591 MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
592 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
593 MachineMemOperand::MODereferenceable;
595 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
596 DL->getPointerABIAlignment());
597 MIB.setMemRefs(MemRefs, MemRefs + 1);
600 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
601 MachineIRBuilder &MIRBuilder) {
602 LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL);
603 LLT s1 = LLT::scalar(1);
604 unsigned Width = Ty.getSizeInBits();
605 unsigned Res = MRI->createGenericVirtualRegister(Ty);
606 unsigned Overflow = MRI->createGenericVirtualRegister(s1);
607 auto MIB = MIRBuilder.buildInstr(Op)
610 .addUse(getOrCreateVReg(*CI.getOperand(0)))
611 .addUse(getOrCreateVReg(*CI.getOperand(1)));
613 if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
614 unsigned Zero = getOrCreateVReg(
615 *Constant::getNullValue(Type::getInt1Ty(CI.getContext())));
619 MIRBuilder.buildSequence(getOrCreateVReg(CI), {Res, Overflow}, {0, Width});
623 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
624 MachineIRBuilder &MIRBuilder) {
628 case Intrinsic::lifetime_start:
629 case Intrinsic::lifetime_end:
630 // Stack coloring is not enabled in O0 (which we care about now) so we can
631 // drop these. Make sure someone notices when we start compiling at higher
633 if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
636 case Intrinsic::dbg_declare: {
637 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
638 assert(DI.getVariable() && "Missing variable");
640 const Value *Address = DI.getAddress();
641 if (!Address || isa<UndefValue>(Address)) {
642 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
646 assert(DI.getVariable()->isValidLocationForIntrinsic(
647 MIRBuilder.getDebugLoc()) &&
648 "Expected inlined-at fields to agree");
649 auto AI = dyn_cast<AllocaInst>(Address);
650 if (AI && AI->isStaticAlloca()) {
651 // Static allocas are tracked at the MF level, no need for DBG_VALUE
652 // instructions (in fact, they get ignored if they *do* exist).
653 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
654 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
656 MIRBuilder.buildDirectDbgValue(getOrCreateVReg(*Address),
657 DI.getVariable(), DI.getExpression());
660 case Intrinsic::vaend:
661 // No target I know of cares about va_end. Certainly no in-tree target
662 // does. Simplest intrinsic ever!
664 case Intrinsic::vastart: {
665 auto &TLI = *MF->getSubtarget().getTargetLowering();
666 Value *Ptr = CI.getArgOperand(0);
667 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
669 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
670 .addUse(getOrCreateVReg(*Ptr))
671 .addMemOperand(MF->getMachineMemOperand(
672 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
675 case Intrinsic::dbg_value: {
676 // This form of DBG_VALUE is target-independent.
677 const DbgValueInst &DI = cast<DbgValueInst>(CI);
678 const Value *V = DI.getValue();
679 assert(DI.getVariable()->isValidLocationForIntrinsic(
680 MIRBuilder.getDebugLoc()) &&
681 "Expected inlined-at fields to agree");
683 // Currently the optimizer can produce this; insert an undef to
684 // help debugging. Probably the optimizer should not do this.
685 MIRBuilder.buildIndirectDbgValue(0, DI.getOffset(), DI.getVariable(),
687 } else if (const auto *CI = dyn_cast<Constant>(V)) {
688 MIRBuilder.buildConstDbgValue(*CI, DI.getOffset(), DI.getVariable(),
691 unsigned Reg = getOrCreateVReg(*V);
692 // FIXME: This does not handle register-indirect values at offset 0. The
693 // direct/indirect thing shouldn't really be handled by something as
694 // implicit as reg+noreg vs reg+imm in the first palce, but it seems
695 // pretty baked in right now.
696 if (DI.getOffset() != 0)
697 MIRBuilder.buildIndirectDbgValue(Reg, DI.getOffset(), DI.getVariable(),
700 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(),
705 case Intrinsic::uadd_with_overflow:
706 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
707 case Intrinsic::sadd_with_overflow:
708 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
709 case Intrinsic::usub_with_overflow:
710 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
711 case Intrinsic::ssub_with_overflow:
712 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
713 case Intrinsic::umul_with_overflow:
714 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
715 case Intrinsic::smul_with_overflow:
716 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
718 MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
719 .addDef(getOrCreateVReg(CI))
720 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
721 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
724 MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
725 .addDef(getOrCreateVReg(CI))
726 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
728 case Intrinsic::exp2:
729 MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
730 .addDef(getOrCreateVReg(CI))
731 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
734 MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
735 .addDef(getOrCreateVReg(CI))
736 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
738 case Intrinsic::log2:
739 MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
740 .addDef(getOrCreateVReg(CI))
741 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
744 MIRBuilder.buildInstr(TargetOpcode::G_FMA)
745 .addDef(getOrCreateVReg(CI))
746 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
747 .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
748 .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
750 case Intrinsic::memcpy:
751 case Intrinsic::memmove:
752 case Intrinsic::memset:
753 return translateMemfunc(CI, MIRBuilder, ID);
754 case Intrinsic::eh_typeid_for: {
755 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
756 unsigned Reg = getOrCreateVReg(CI);
757 unsigned TypeID = MF->getTypeIDFor(GV);
758 MIRBuilder.buildConstant(Reg, TypeID);
761 case Intrinsic::objectsize: {
762 // If we don't know by now, we're never going to know.
763 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
765 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
768 case Intrinsic::stackguard:
769 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
771 case Intrinsic::stackprotector: {
772 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
773 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
774 getStackGuard(GuardVal, MIRBuilder);
776 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
777 MIRBuilder.buildStore(
778 GuardVal, getOrCreateVReg(*Slot),
779 *MF->getMachineMemOperand(
780 MachinePointerInfo::getFixedStack(*MF,
781 getOrCreateFrameIndex(*Slot)),
782 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
783 PtrTy.getSizeInBits() / 8, 8));
790 bool IRTranslator::translateInlineAsm(const CallInst &CI,
791 MachineIRBuilder &MIRBuilder) {
792 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
793 if (!IA.getConstraintString().empty())
796 unsigned ExtraInfo = 0;
797 if (IA.hasSideEffects())
798 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
799 if (IA.getDialect() == InlineAsm::AD_Intel)
800 ExtraInfo |= InlineAsm::Extra_AsmDialect;
802 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
803 .addExternalSymbol(IA.getAsmString().c_str())
809 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
810 const CallInst &CI = cast<CallInst>(U);
811 auto TII = MF->getTarget().getIntrinsicInfo();
812 const Function *F = CI.getCalledFunction();
814 if (CI.isInlineAsm())
815 return translateInlineAsm(CI, MIRBuilder);
817 if (!F || !F->isIntrinsic()) {
818 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
819 SmallVector<unsigned, 8> Args;
820 for (auto &Arg: CI.arg_operands())
821 Args.push_back(getOrCreateVReg(*Arg));
823 MF->getFrameInfo().setHasCalls(true);
824 return CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
825 return getOrCreateVReg(*CI.getCalledValue());
829 Intrinsic::ID ID = F->getIntrinsicID();
830 if (TII && ID == Intrinsic::not_intrinsic)
831 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
833 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
835 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
838 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
839 MachineInstrBuilder MIB =
840 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
842 for (auto &Arg : CI.arg_operands()) {
843 // Some intrinsics take metadata parameters. Reject them.
844 if (isa<MetadataAsValue>(Arg))
846 MIB.addUse(getOrCreateVReg(*Arg));
849 // Add a MachineMemOperand if it is a target mem intrinsic.
850 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
851 TargetLowering::IntrinsicInfo Info;
852 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
853 if (TLI.getTgtMemIntrinsic(Info, CI, ID)) {
854 MachineMemOperand::Flags Flags =
855 Info.vol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
857 Info.readMem ? MachineMemOperand::MOLoad : MachineMemOperand::MOStore;
858 uint64_t Size = Info.memVT.getSizeInBits() >> 3;
859 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
860 Flags, Size, Info.align));
866 bool IRTranslator::translateInvoke(const User &U,
867 MachineIRBuilder &MIRBuilder) {
868 const InvokeInst &I = cast<InvokeInst>(U);
869 MCContext &Context = MF->getContext();
871 const BasicBlock *ReturnBB = I.getSuccessor(0);
872 const BasicBlock *EHPadBB = I.getSuccessor(1);
874 const Value *Callee = I.getCalledValue();
875 const Function *Fn = dyn_cast<Function>(Callee);
876 if (isa<InlineAsm>(Callee))
879 // FIXME: support invoking patchpoint and statepoint intrinsics.
880 if (Fn && Fn->isIntrinsic())
883 // FIXME: support whatever these are.
884 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
887 // FIXME: support Windows exception handling.
888 if (!isa<LandingPadInst>(EHPadBB->front()))
891 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
892 // the region covered by the try.
893 MCSymbol *BeginSymbol = Context.createTempSymbol();
894 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
896 unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
897 SmallVector<unsigned, 8> Args;
898 for (auto &Arg: I.arg_operands())
899 Args.push_back(getOrCreateVReg(*Arg));
901 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
902 [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
905 MCSymbol *EndSymbol = Context.createTempSymbol();
906 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
908 // FIXME: track probabilities.
909 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
910 &ReturnMBB = getMBB(*ReturnBB);
911 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
912 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
913 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
914 MIRBuilder.buildBr(ReturnMBB);
919 bool IRTranslator::translateLandingPad(const User &U,
920 MachineIRBuilder &MIRBuilder) {
921 const LandingPadInst &LP = cast<LandingPadInst>(U);
923 MachineBasicBlock &MBB = MIRBuilder.getMBB();
924 addLandingPadInfo(LP, MBB);
928 // If there aren't registers to copy the values into (e.g., during SjLj
929 // exceptions), then don't bother.
930 auto &TLI = *MF->getSubtarget().getTargetLowering();
931 const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn();
932 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
933 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
936 // If landingpad's return type is token type, we don't create DAG nodes
937 // for its exception pointer and selector value. The extraction of exception
938 // pointer or selector value from token type landingpads is not currently
940 if (LP.getType()->isTokenTy())
943 // Add a label to mark the beginning of the landing pad. Deletion of the
944 // landing pad can thus be detected via the MachineModuleInfo.
945 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
946 .addSym(MF->addLandingPad(&MBB));
948 LLT Ty = getLLTForType(*LP.getType(), *DL);
949 unsigned Undef = MRI->createGenericVirtualRegister(Ty);
950 MIRBuilder.buildUndef(Undef);
952 SmallVector<LLT, 2> Tys;
953 for (Type *Ty : cast<StructType>(LP.getType())->elements())
954 Tys.push_back(getLLTForType(*Ty, *DL));
955 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
957 // Mark exception register as live in.
958 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
962 MBB.addLiveIn(ExceptionReg);
963 unsigned VReg = MRI->createGenericVirtualRegister(Tys[0]),
964 Tmp = MRI->createGenericVirtualRegister(Ty);
965 MIRBuilder.buildCopy(VReg, ExceptionReg);
966 MIRBuilder.buildInsert(Tmp, Undef, VReg, 0);
968 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
972 MBB.addLiveIn(SelectorReg);
974 // N.b. the exception selector register always has pointer type and may not
975 // match the actual IR-level type in the landingpad so an extra cast is
977 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
978 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
980 VReg = MRI->createGenericVirtualRegister(Tys[1]);
981 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT).addDef(VReg).addUse(PtrVReg);
982 MIRBuilder.buildInsert(getOrCreateVReg(LP), Tmp, VReg,
983 Tys[0].getSizeInBits());
987 bool IRTranslator::translateAlloca(const User &U,
988 MachineIRBuilder &MIRBuilder) {
989 auto &AI = cast<AllocaInst>(U);
991 if (AI.isStaticAlloca()) {
992 unsigned Res = getOrCreateVReg(AI);
993 int FI = getOrCreateFrameIndex(AI);
994 MIRBuilder.buildFrameIndex(Res, FI);
998 // Now we're in the harder dynamic case.
999 Type *Ty = AI.getAllocatedType();
1001 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1003 unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1005 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1006 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1007 if (MRI->getType(NumElts) != IntPtrTy) {
1008 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1009 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1013 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1015 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1016 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1018 LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1019 auto &TLI = *MF->getSubtarget().getTargetLowering();
1020 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1022 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1023 MIRBuilder.buildCopy(SPTmp, SPReg);
1025 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1026 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1028 // Handle alignment. We have to realign if the allocation granule was smaller
1029 // than stack alignment, or the specific alloca requires more than stack
1031 unsigned StackAlign =
1032 MF->getSubtarget().getFrameLowering()->getStackAlignment();
1033 Align = std::max(Align, StackAlign);
1034 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1035 // Round the size of the allocation up to the stack alignment size
1036 // by add SA-1 to the size. This doesn't overflow because we're computing
1037 // an address inside an alloca.
1038 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1039 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1040 AllocTmp = AlignedAlloc;
1043 MIRBuilder.buildCopy(SPReg, AllocTmp);
1044 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1046 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1047 assert(MF->getFrameInfo().hasVarSizedObjects());
1051 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1052 // FIXME: We may need more info about the type. Because of how LLT works,
1053 // we're completely discarding the i64/double distinction here (amongst
1054 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1055 // anyway but that's not guaranteed.
1056 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1057 .addDef(getOrCreateVReg(U))
1058 .addUse(getOrCreateVReg(*U.getOperand(0)))
1059 .addImm(DL->getABITypeAlignment(U.getType()));
1063 bool IRTranslator::translateInsertElement(const User &U,
1064 MachineIRBuilder &MIRBuilder) {
1065 // If it is a <1 x Ty> vector, use the scalar as it is
1066 // not a legal vector type in LLT.
1067 if (U.getType()->getVectorNumElements() == 1) {
1068 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1069 ValToVReg[&U] = Elt;
1072 unsigned Res = getOrCreateVReg(U);
1073 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1074 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1075 unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1076 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1080 bool IRTranslator::translateExtractElement(const User &U,
1081 MachineIRBuilder &MIRBuilder) {
1082 // If it is a <1 x Ty> vector, use the scalar as it is
1083 // not a legal vector type in LLT.
1084 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1085 unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1086 ValToVReg[&U] = Elt;
1089 unsigned Res = getOrCreateVReg(U);
1090 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1091 unsigned Idx = getOrCreateVReg(*U.getOperand(1));
1092 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1096 bool IRTranslator::translateShuffleVector(const User &U,
1097 MachineIRBuilder &MIRBuilder) {
1098 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1099 .addDef(getOrCreateVReg(U))
1100 .addUse(getOrCreateVReg(*U.getOperand(0)))
1101 .addUse(getOrCreateVReg(*U.getOperand(1)))
1102 .addUse(getOrCreateVReg(*U.getOperand(2)));
1106 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1107 const PHINode &PI = cast<PHINode>(U);
1108 auto MIB = MIRBuilder.buildInstr(TargetOpcode::PHI);
1109 MIB.addDef(getOrCreateVReg(PI));
1111 PendingPHIs.emplace_back(&PI, MIB.getInstr());
1115 void IRTranslator::finishPendingPhis() {
1116 for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
1117 const PHINode *PI = Phi.first;
1118 MachineInstrBuilder MIB(*MF, Phi.second);
1120 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1121 // won't create extra control flow here, otherwise we need to find the
1122 // dominating predecessor here (or perhaps force the weirder IRTranslators
1123 // to provide a simple boundary).
1124 SmallSet<const BasicBlock *, 4> HandledPreds;
1126 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1127 auto IRPred = PI->getIncomingBlock(i);
1128 if (HandledPreds.count(IRPred))
1131 HandledPreds.insert(IRPred);
1132 unsigned ValReg = getOrCreateVReg(*PI->getIncomingValue(i));
1133 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1134 assert(Pred->isSuccessor(MIB->getParent()) &&
1135 "incorrect CFG at MachineBasicBlock level");
1143 bool IRTranslator::translate(const Instruction &Inst) {
1144 CurBuilder.setDebugLoc(Inst.getDebugLoc());
1145 switch(Inst.getOpcode()) {
1146 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1147 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
1148 #include "llvm/IR/Instruction.def"
1154 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1155 if (auto CI = dyn_cast<ConstantInt>(&C))
1156 EntryBuilder.buildConstant(Reg, *CI);
1157 else if (auto CF = dyn_cast<ConstantFP>(&C))
1158 EntryBuilder.buildFConstant(Reg, *CF);
1159 else if (isa<UndefValue>(C))
1160 EntryBuilder.buildUndef(Reg);
1161 else if (isa<ConstantPointerNull>(C))
1162 EntryBuilder.buildConstant(Reg, 0);
1163 else if (auto GV = dyn_cast<GlobalValue>(&C))
1164 EntryBuilder.buildGlobalValue(Reg, GV);
1165 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1166 if (!CAZ->getType()->isVectorTy())
1168 // Return the scalar if it is a <1 x Ty> vector.
1169 if (CAZ->getNumElements() == 1)
1170 return translate(*CAZ->getElementValue(0u), Reg);
1171 std::vector<unsigned> Ops;
1172 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1173 Constant &Elt = *CAZ->getElementValue(i);
1174 Ops.push_back(getOrCreateVReg(Elt));
1176 EntryBuilder.buildMerge(Reg, Ops);
1177 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1178 // Return the scalar if it is a <1 x Ty> vector.
1179 if (CV->getNumElements() == 1)
1180 return translate(*CV->getElementAsConstant(0), Reg);
1181 std::vector<unsigned> Ops;
1182 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1183 Constant &Elt = *CV->getElementAsConstant(i);
1184 Ops.push_back(getOrCreateVReg(Elt));
1186 EntryBuilder.buildMerge(Reg, Ops);
1187 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1188 switch(CE->getOpcode()) {
1189 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1190 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
1191 #include "llvm/IR/Instruction.def"
1195 } else if (auto CS = dyn_cast<ConstantStruct>(&C)) {
1196 // Return the element if it is a single element ConstantStruct.
1197 if (CS->getNumOperands() == 1) {
1198 unsigned EltReg = getOrCreateVReg(*CS->getOperand(0));
1199 EntryBuilder.buildCast(Reg, EltReg);
1202 SmallVector<unsigned, 4> Ops;
1203 SmallVector<uint64_t, 4> Indices;
1204 uint64_t Offset = 0;
1205 for (unsigned i = 0; i < CS->getNumOperands(); ++i) {
1206 unsigned OpReg = getOrCreateVReg(*CS->getOperand(i));
1207 Ops.push_back(OpReg);
1208 Indices.push_back(Offset);
1209 Offset += MRI->getType(OpReg).getSizeInBits();
1211 EntryBuilder.buildSequence(Reg, Ops, Indices);
1212 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1213 if (CV->getNumOperands() == 1)
1214 return translate(*CV->getOperand(0), Reg);
1215 SmallVector<unsigned, 4> Ops;
1216 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1217 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1219 EntryBuilder.buildMerge(Reg, Ops);
1226 void IRTranslator::finalizeFunction() {
1227 // Release the memory used by the different maps we
1228 // needed during the translation.
1229 PendingPHIs.clear();
1231 FrameIndices.clear();
1232 MachinePreds.clear();
1233 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1234 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1235 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1236 EntryBuilder = MachineIRBuilder();
1237 CurBuilder = MachineIRBuilder();
1240 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
1242 const Function &F = *MF->getFunction();
1245 CLI = MF->getSubtarget().getCallLowering();
1246 CurBuilder.setMF(*MF);
1247 EntryBuilder.setMF(*MF);
1248 MRI = &MF->getRegInfo();
1249 DL = &F.getParent()->getDataLayout();
1250 TPC = &getAnalysis<TargetPassConfig>();
1251 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1253 assert(PendingPHIs.empty() && "stale PHIs");
1255 // Release the per-function state when we return, whether we succeeded or not.
1256 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1258 // Setup a separate basic-block for the arguments and constants
1259 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1260 MF->push_back(EntryBB);
1261 EntryBuilder.setMBB(*EntryBB);
1263 // Create all blocks, in IR order, to preserve the layout.
1264 for (const BasicBlock &BB: F) {
1265 auto *&MBB = BBToMBB[&BB];
1267 MBB = MF->CreateMachineBasicBlock(&BB);
1270 if (BB.hasAddressTaken())
1271 MBB->setHasAddressTaken();
1274 // Make our arguments/constants entry block fallthrough to the IR entry block.
1275 EntryBB->addSuccessor(&getMBB(F.front()));
1277 // Lower the actual args into this basic block.
1278 SmallVector<unsigned, 8> VRegArgs;
1279 for (const Argument &Arg: F.args())
1280 VRegArgs.push_back(getOrCreateVReg(Arg));
1281 if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
1282 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1283 MF->getFunction()->getSubprogram(),
1284 &MF->getFunction()->getEntryBlock());
1285 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1286 reportTranslationError(*MF, *TPC, *ORE, R);
1290 // And translate the function!
1291 for (const BasicBlock &BB: F) {
1292 MachineBasicBlock &MBB = getMBB(BB);
1293 // Set the insertion point of all the following translations to
1294 // the end of this basic block.
1295 CurBuilder.setMBB(MBB);
1297 for (const Instruction &Inst: BB) {
1298 if (translate(Inst))
1301 std::string InstStrStorage;
1302 raw_string_ostream InstStr(InstStrStorage);
1305 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1306 Inst.getDebugLoc(), &BB);
1307 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst)
1308 << ": '" << InstStr.str() << "'";
1309 reportTranslationError(*MF, *TPC, *ORE, R);
1314 finishPendingPhis();
1316 // Merge the argument lowering and constants block with its single
1317 // successor, the LLVM-IR entry block. We want the basic block to
1319 assert(EntryBB->succ_size() == 1 &&
1320 "Custom BB used for lowering should have only one successor");
1321 // Get the successor of the current entry block.
1322 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1323 assert(NewEntryBB.pred_size() == 1 &&
1324 "LLVM-IR entry block has a predecessor!?");
1325 // Move all the instruction from the current entry block to the
1327 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1330 // Update the live-in information for the new entry block.
1331 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1332 NewEntryBB.addLiveIn(LiveIn);
1333 NewEntryBB.sortUniqueLiveIns();
1335 // Get rid of the now empty basic block.
1336 EntryBB->removeSuccessor(&NewEntryBB);
1337 MF->remove(EntryBB);
1338 MF->DeleteMachineBasicBlock(EntryBB);
1340 assert(&MF->front() == &NewEntryBB &&
1341 "New entry wasn't next in the list of basic block!");