1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
21 #include "llvm/CodeGen/LowLevelType.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineMemOperand.h"
27 #include "llvm/CodeGen/MachineOperand.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetFrameLowering.h"
30 #include "llvm/CodeGen/TargetLowering.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/CodeGen/TargetSubtargetInfo.h"
34 #include "llvm/IR/BasicBlock.h"
35 #include "llvm/IR/Constant.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DebugInfo.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GetElementPtrTypeIterator.h"
42 #include "llvm/IR/InlineAsm.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/MC/MCContext.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CodeGen.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/LowLevelTypeImpl.h"
59 #include "llvm/Support/MathExtras.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Target/TargetIntrinsicInfo.h"
62 #include "llvm/Target/TargetMachine.h"
71 #define DEBUG_TYPE "irtranslator"
75 char IRTranslator::ID = 0;
77 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
79 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
80 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
83 static void reportTranslationError(MachineFunction &MF,
84 const TargetPassConfig &TPC,
85 OptimizationRemarkEmitter &ORE,
86 OptimizationRemarkMissed &R) {
87 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
89 // Print the function name explicitly if we don't have a debug location (which
90 // makes the diagnostic less useful) or if we're going to emit a raw error.
91 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
92 R << (" (in function: " + MF.getName() + ")").str();
94 if (TPC.isGlobalISelAbortEnabled())
95 report_fatal_error(R.getMsg());
100 IRTranslator::IRTranslator() : MachineFunctionPass(ID) {
101 initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
104 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
105 AU.addRequired<TargetPassConfig>();
106 MachineFunctionPass::getAnalysisUsage(AU);
109 unsigned IRTranslator::getOrCreateVReg(const Value &Val) {
110 unsigned &ValReg = ValToVReg[&Val];
115 // Fill ValRegsSequence with the sequence of registers
116 // we need to concat together to produce the value.
117 assert(Val.getType()->isSized() &&
118 "Don't know how to create an empty vreg");
120 MRI->createGenericVirtualRegister(getLLTForType(*Val.getType(), *DL));
123 if (auto CV = dyn_cast<Constant>(&Val)) {
124 bool Success = translate(*CV, VReg);
126 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
127 MF->getFunction().getSubprogram(),
128 &MF->getFunction().getEntryBlock());
129 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
130 reportTranslationError(*MF, *TPC, *ORE, R);
138 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
139 if (FrameIndices.find(&AI) != FrameIndices.end())
140 return FrameIndices[&AI];
142 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
144 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
146 // Always allocate at least one byte.
147 Size = std::max(Size, 1u);
149 unsigned Alignment = AI.getAlignment();
151 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
153 int &FI = FrameIndices[&AI];
154 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
158 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
159 unsigned Alignment = 0;
160 Type *ValTy = nullptr;
161 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
162 Alignment = SI->getAlignment();
163 ValTy = SI->getValueOperand()->getType();
164 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
165 Alignment = LI->getAlignment();
166 ValTy = LI->getType();
168 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
169 R << "unable to translate memop: " << ore::NV("Opcode", &I);
170 reportTranslationError(*MF, *TPC, *ORE, R);
174 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
177 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
178 MachineBasicBlock *&MBB = BBToMBB[&BB];
179 assert(MBB && "BasicBlock was not encountered before");
183 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
184 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
185 MachinePreds[Edge].push_back(NewPred);
188 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
189 MachineIRBuilder &MIRBuilder) {
190 // FIXME: handle signed/unsigned wrapping flags.
192 // Get or create a virtual register for each value.
193 // Unless the value is a Constant => loadimm cst?
194 // or inline constant each time?
195 // Creation of a virtual register needs to have a size.
196 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
197 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
198 unsigned Res = getOrCreateVReg(U);
199 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
203 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
204 // -0.0 - X --> G_FNEG
205 if (isa<Constant>(U.getOperand(0)) &&
206 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
207 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
208 .addDef(getOrCreateVReg(U))
209 .addUse(getOrCreateVReg(*U.getOperand(1)));
212 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
215 bool IRTranslator::translateCompare(const User &U,
216 MachineIRBuilder &MIRBuilder) {
217 const CmpInst *CI = dyn_cast<CmpInst>(&U);
218 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
219 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
220 unsigned Res = getOrCreateVReg(U);
221 CmpInst::Predicate Pred =
222 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
223 cast<ConstantExpr>(U).getPredicate());
224 if (CmpInst::isIntPredicate(Pred))
225 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
226 else if (Pred == CmpInst::FCMP_FALSE)
227 MIRBuilder.buildCopy(
228 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
229 else if (Pred == CmpInst::FCMP_TRUE)
230 MIRBuilder.buildCopy(
231 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
233 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
238 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
239 const ReturnInst &RI = cast<ReturnInst>(U);
240 const Value *Ret = RI.getReturnValue();
241 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
243 // The target may mess up with the insertion point, but
244 // this is not important as a return is the last instruction
245 // of the block anyway.
246 return CLI->lowerReturn(MIRBuilder, Ret, !Ret ? 0 : getOrCreateVReg(*Ret));
249 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
250 const BranchInst &BrInst = cast<BranchInst>(U);
252 if (!BrInst.isUnconditional()) {
253 // We want a G_BRCOND to the true BB followed by an unconditional branch.
254 unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
255 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
256 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
257 MIRBuilder.buildBrCond(Tst, TrueBB);
260 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
261 MachineBasicBlock &TgtBB = getMBB(BrTgt);
262 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
264 // If the unconditional target is the layout successor, fallthrough.
265 if (!CurBB.isLayoutSuccessor(&TgtBB))
266 MIRBuilder.buildBr(TgtBB);
269 for (const BasicBlock *Succ : BrInst.successors())
270 CurBB.addSuccessor(&getMBB(*Succ));
274 bool IRTranslator::translateSwitch(const User &U,
275 MachineIRBuilder &MIRBuilder) {
276 // For now, just translate as a chain of conditional branches.
277 // FIXME: could we share most of the logic/code in
278 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
279 // At first sight, it seems most of the logic in there is independent of
280 // SelectionDAG-specifics and a lot of work went in to optimize switch
281 // lowering in there.
283 const SwitchInst &SwInst = cast<SwitchInst>(U);
284 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
285 const BasicBlock *OrigBB = SwInst.getParent();
287 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
288 for (auto &CaseIt : SwInst.cases()) {
289 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
290 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
291 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
292 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
293 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
294 MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
296 MIRBuilder.buildBrCond(Tst, TrueMBB);
297 CurMBB.addSuccessor(&TrueMBB);
298 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
300 MachineBasicBlock *FalseMBB =
301 MF->CreateMachineBasicBlock(SwInst.getParent());
302 // Insert the comparison blocks one after the other.
303 MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
304 MIRBuilder.buildBr(*FalseMBB);
305 CurMBB.addSuccessor(FalseMBB);
307 MIRBuilder.setMBB(*FalseMBB);
309 // handle default case
310 const BasicBlock *DefaultBB = SwInst.getDefaultDest();
311 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
312 MIRBuilder.buildBr(DefaultMBB);
313 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
314 CurMBB.addSuccessor(&DefaultMBB);
315 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
320 bool IRTranslator::translateIndirectBr(const User &U,
321 MachineIRBuilder &MIRBuilder) {
322 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
324 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
325 MIRBuilder.buildBrIndirect(Tgt);
328 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
329 for (const BasicBlock *Succ : BrInst.successors())
330 CurBB.addSuccessor(&getMBB(*Succ));
335 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
336 const LoadInst &LI = cast<LoadInst>(U);
338 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
339 : MachineMemOperand::MONone;
340 Flags |= MachineMemOperand::MOLoad;
342 if (DL->getTypeStoreSize(LI.getType()) == 0)
345 unsigned Res = getOrCreateVReg(LI);
346 unsigned Addr = getOrCreateVReg(*LI.getPointerOperand());
348 MIRBuilder.buildLoad(
350 *MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
351 Flags, DL->getTypeStoreSize(LI.getType()),
352 getMemOpAlignment(LI), AAMDNodes(), nullptr,
353 LI.getSyncScopeID(), LI.getOrdering()));
357 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
358 const StoreInst &SI = cast<StoreInst>(U);
359 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
360 : MachineMemOperand::MONone;
361 Flags |= MachineMemOperand::MOStore;
363 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
366 unsigned Val = getOrCreateVReg(*SI.getValueOperand());
367 unsigned Addr = getOrCreateVReg(*SI.getPointerOperand());
369 MIRBuilder.buildStore(
371 *MF->getMachineMemOperand(
372 MachinePointerInfo(SI.getPointerOperand()), Flags,
373 DL->getTypeStoreSize(SI.getValueOperand()->getType()),
374 getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(),
379 bool IRTranslator::translateExtractValue(const User &U,
380 MachineIRBuilder &MIRBuilder) {
381 const Value *Src = U.getOperand(0);
382 Type *Int32Ty = Type::getInt32Ty(U.getContext());
383 SmallVector<Value *, 1> Indices;
385 // If Src is a single element ConstantStruct, translate extractvalue
386 // to that element to avoid inserting a cast instruction.
387 if (auto CS = dyn_cast<ConstantStruct>(Src))
388 if (CS->getNumOperands() == 1) {
389 unsigned Res = getOrCreateVReg(*CS->getOperand(0));
394 // getIndexedOffsetInType is designed for GEPs, so the first index is the
395 // usual array element rather than looking into the actual aggregate.
396 Indices.push_back(ConstantInt::get(Int32Ty, 0));
398 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
399 for (auto Idx : EVI->indices())
400 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
402 for (unsigned i = 1; i < U.getNumOperands(); ++i)
403 Indices.push_back(U.getOperand(i));
406 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
408 unsigned Res = getOrCreateVReg(U);
409 MIRBuilder.buildExtract(Res, getOrCreateVReg(*Src), Offset);
414 bool IRTranslator::translateInsertValue(const User &U,
415 MachineIRBuilder &MIRBuilder) {
416 const Value *Src = U.getOperand(0);
417 Type *Int32Ty = Type::getInt32Ty(U.getContext());
418 SmallVector<Value *, 1> Indices;
420 // getIndexedOffsetInType is designed for GEPs, so the first index is the
421 // usual array element rather than looking into the actual aggregate.
422 Indices.push_back(ConstantInt::get(Int32Ty, 0));
424 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
425 for (auto Idx : IVI->indices())
426 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
428 for (unsigned i = 2; i < U.getNumOperands(); ++i)
429 Indices.push_back(U.getOperand(i));
432 uint64_t Offset = 8 * DL->getIndexedOffsetInType(Src->getType(), Indices);
434 unsigned Res = getOrCreateVReg(U);
435 unsigned Inserted = getOrCreateVReg(*U.getOperand(1));
436 MIRBuilder.buildInsert(Res, getOrCreateVReg(*Src), Inserted, Offset);
441 bool IRTranslator::translateSelect(const User &U,
442 MachineIRBuilder &MIRBuilder) {
443 unsigned Res = getOrCreateVReg(U);
444 unsigned Tst = getOrCreateVReg(*U.getOperand(0));
445 unsigned Op0 = getOrCreateVReg(*U.getOperand(1));
446 unsigned Op1 = getOrCreateVReg(*U.getOperand(2));
447 MIRBuilder.buildSelect(Res, Tst, Op0, Op1);
451 bool IRTranslator::translateBitCast(const User &U,
452 MachineIRBuilder &MIRBuilder) {
453 // If we're bitcasting to the source type, we can reuse the source vreg.
454 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
455 getLLTForType(*U.getType(), *DL)) {
456 // Get the source vreg now, to avoid invalidating ValToVReg.
457 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
458 unsigned &Reg = ValToVReg[&U];
459 // If we already assigned a vreg for this bitcast, we can't change that.
460 // Emit a copy to satisfy the users we already emitted.
462 MIRBuilder.buildCopy(Reg, SrcReg);
467 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
470 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
471 MachineIRBuilder &MIRBuilder) {
472 unsigned Op = getOrCreateVReg(*U.getOperand(0));
473 unsigned Res = getOrCreateVReg(U);
474 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
478 bool IRTranslator::translateGetElementPtr(const User &U,
479 MachineIRBuilder &MIRBuilder) {
480 // FIXME: support vector GEPs.
481 if (U.getType()->isVectorTy())
484 Value &Op0 = *U.getOperand(0);
485 unsigned BaseReg = getOrCreateVReg(Op0);
486 Type *PtrIRTy = Op0.getType();
487 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
488 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
489 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
492 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
494 const Value *Idx = GTI.getOperand();
495 if (StructType *StTy = GTI.getStructTypeOrNull()) {
496 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
497 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
500 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
502 // If this is a scalar constant or a splat vector of constants,
503 // handle it quickly.
504 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
505 Offset += ElementSize * CI->getSExtValue();
510 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
512 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
513 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
515 BaseReg = NewBaseReg;
519 unsigned IdxReg = getOrCreateVReg(*Idx);
520 if (MRI->getType(IdxReg) != OffsetTy) {
521 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
522 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
526 // N = N + Idx * ElementSize;
527 // Avoid doing it for ElementSize of 1.
528 unsigned GepOffsetReg;
529 if (ElementSize != 1) {
530 unsigned ElementSizeReg =
531 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
533 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
534 MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
536 GepOffsetReg = IdxReg;
538 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
539 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
540 BaseReg = NewBaseReg;
545 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
546 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
550 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
554 bool IRTranslator::translateMemfunc(const CallInst &CI,
555 MachineIRBuilder &MIRBuilder,
557 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
558 Type *DstTy = CI.getArgOperand(0)->getType();
559 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
560 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
563 SmallVector<CallLowering::ArgInfo, 8> Args;
564 for (int i = 0; i < 3; ++i) {
565 const auto &Arg = CI.getArgOperand(i);
566 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
571 case Intrinsic::memmove:
572 case Intrinsic::memcpy: {
573 Type *SrcTy = CI.getArgOperand(1)->getType();
574 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
576 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
579 case Intrinsic::memset:
586 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
587 MachineOperand::CreateES(Callee),
588 CallLowering::ArgInfo(0, CI.getType()), Args);
591 void IRTranslator::getStackGuard(unsigned DstReg,
592 MachineIRBuilder &MIRBuilder) {
593 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
594 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
595 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
598 auto &TLI = *MF->getSubtarget().getTargetLowering();
599 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
603 MachinePointerInfo MPInfo(Global);
604 MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
605 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
606 MachineMemOperand::MODereferenceable;
608 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
609 DL->getPointerABIAlignment(0));
610 MIB.setMemRefs(MemRefs, MemRefs + 1);
613 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
614 MachineIRBuilder &MIRBuilder) {
615 LLT Ty = getLLTForType(*CI.getOperand(0)->getType(), *DL);
616 LLT s1 = LLT::scalar(1);
617 unsigned Width = Ty.getSizeInBits();
618 unsigned Res = MRI->createGenericVirtualRegister(Ty);
619 unsigned Overflow = MRI->createGenericVirtualRegister(s1);
620 auto MIB = MIRBuilder.buildInstr(Op)
623 .addUse(getOrCreateVReg(*CI.getOperand(0)))
624 .addUse(getOrCreateVReg(*CI.getOperand(1)));
626 if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
627 unsigned Zero = getOrCreateVReg(
628 *Constant::getNullValue(Type::getInt1Ty(CI.getContext())));
632 MIRBuilder.buildSequence(getOrCreateVReg(CI), {Res, Overflow}, {0, Width});
636 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
637 MachineIRBuilder &MIRBuilder) {
641 case Intrinsic::lifetime_start:
642 case Intrinsic::lifetime_end:
643 // Stack coloring is not enabled in O0 (which we care about now) so we can
644 // drop these. Make sure someone notices when we start compiling at higher
646 if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
649 case Intrinsic::dbg_declare: {
650 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
651 assert(DI.getVariable() && "Missing variable");
653 const Value *Address = DI.getAddress();
654 if (!Address || isa<UndefValue>(Address)) {
655 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
659 assert(DI.getVariable()->isValidLocationForIntrinsic(
660 MIRBuilder.getDebugLoc()) &&
661 "Expected inlined-at fields to agree");
662 auto AI = dyn_cast<AllocaInst>(Address);
663 if (AI && AI->isStaticAlloca()) {
664 // Static allocas are tracked at the MF level, no need for DBG_VALUE
665 // instructions (in fact, they get ignored if they *do* exist).
666 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
667 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
669 MIRBuilder.buildDirectDbgValue(getOrCreateVReg(*Address),
670 DI.getVariable(), DI.getExpression());
673 case Intrinsic::vaend:
674 // No target I know of cares about va_end. Certainly no in-tree target
675 // does. Simplest intrinsic ever!
677 case Intrinsic::vastart: {
678 auto &TLI = *MF->getSubtarget().getTargetLowering();
679 Value *Ptr = CI.getArgOperand(0);
680 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
682 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
683 .addUse(getOrCreateVReg(*Ptr))
684 .addMemOperand(MF->getMachineMemOperand(
685 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
688 case Intrinsic::dbg_value: {
689 // This form of DBG_VALUE is target-independent.
690 const DbgValueInst &DI = cast<DbgValueInst>(CI);
691 const Value *V = DI.getValue();
692 assert(DI.getVariable()->isValidLocationForIntrinsic(
693 MIRBuilder.getDebugLoc()) &&
694 "Expected inlined-at fields to agree");
696 // Currently the optimizer can produce this; insert an undef to
697 // help debugging. Probably the optimizer should not do this.
698 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
699 } else if (const auto *CI = dyn_cast<Constant>(V)) {
700 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
702 unsigned Reg = getOrCreateVReg(*V);
703 // FIXME: This does not handle register-indirect values at offset 0. The
704 // direct/indirect thing shouldn't really be handled by something as
705 // implicit as reg+noreg vs reg+imm in the first palce, but it seems
706 // pretty baked in right now.
707 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
711 case Intrinsic::uadd_with_overflow:
712 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
713 case Intrinsic::sadd_with_overflow:
714 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
715 case Intrinsic::usub_with_overflow:
716 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
717 case Intrinsic::ssub_with_overflow:
718 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
719 case Intrinsic::umul_with_overflow:
720 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
721 case Intrinsic::smul_with_overflow:
722 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
724 MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
725 .addDef(getOrCreateVReg(CI))
726 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
727 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
730 MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
731 .addDef(getOrCreateVReg(CI))
732 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
734 case Intrinsic::exp2:
735 MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
736 .addDef(getOrCreateVReg(CI))
737 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
740 MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
741 .addDef(getOrCreateVReg(CI))
742 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
744 case Intrinsic::log2:
745 MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
746 .addDef(getOrCreateVReg(CI))
747 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
750 MIRBuilder.buildInstr(TargetOpcode::G_FMA)
751 .addDef(getOrCreateVReg(CI))
752 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
753 .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
754 .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
756 case Intrinsic::fmuladd: {
757 const TargetMachine &TM = MF->getTarget();
758 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
759 unsigned Dst = getOrCreateVReg(CI);
760 unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
761 unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
762 unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
763 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
764 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
765 // TODO: Revisit this to see if we should move this part of the
766 // lowering to the combiner.
767 MIRBuilder.buildInstr(TargetOpcode::G_FMA, Dst, Op0, Op1, Op2);
769 LLT Ty = getLLTForType(*CI.getType(), *DL);
770 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, Ty, Op0, Op1);
771 MIRBuilder.buildInstr(TargetOpcode::G_FADD, Dst, FMul, Op2);
775 case Intrinsic::memcpy:
776 case Intrinsic::memmove:
777 case Intrinsic::memset:
778 return translateMemfunc(CI, MIRBuilder, ID);
779 case Intrinsic::eh_typeid_for: {
780 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
781 unsigned Reg = getOrCreateVReg(CI);
782 unsigned TypeID = MF->getTypeIDFor(GV);
783 MIRBuilder.buildConstant(Reg, TypeID);
786 case Intrinsic::objectsize: {
787 // If we don't know by now, we're never going to know.
788 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
790 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
793 case Intrinsic::stackguard:
794 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
796 case Intrinsic::stackprotector: {
797 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
798 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
799 getStackGuard(GuardVal, MIRBuilder);
801 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
802 MIRBuilder.buildStore(
803 GuardVal, getOrCreateVReg(*Slot),
804 *MF->getMachineMemOperand(
805 MachinePointerInfo::getFixedStack(*MF,
806 getOrCreateFrameIndex(*Slot)),
807 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
808 PtrTy.getSizeInBits() / 8, 8));
815 bool IRTranslator::translateInlineAsm(const CallInst &CI,
816 MachineIRBuilder &MIRBuilder) {
817 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
818 if (!IA.getConstraintString().empty())
821 unsigned ExtraInfo = 0;
822 if (IA.hasSideEffects())
823 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
824 if (IA.getDialect() == InlineAsm::AD_Intel)
825 ExtraInfo |= InlineAsm::Extra_AsmDialect;
827 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
828 .addExternalSymbol(IA.getAsmString().c_str())
834 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
835 const CallInst &CI = cast<CallInst>(U);
836 auto TII = MF->getTarget().getIntrinsicInfo();
837 const Function *F = CI.getCalledFunction();
839 // FIXME: support Windows dllimport function calls.
840 if (F && F->hasDLLImportStorageClass())
843 if (CI.isInlineAsm())
844 return translateInlineAsm(CI, MIRBuilder);
846 Intrinsic::ID ID = Intrinsic::not_intrinsic;
847 if (F && F->isIntrinsic()) {
848 ID = F->getIntrinsicID();
849 if (TII && ID == Intrinsic::not_intrinsic)
850 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
853 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
854 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
855 SmallVector<unsigned, 8> Args;
856 for (auto &Arg: CI.arg_operands())
857 Args.push_back(getOrCreateVReg(*Arg));
859 MF->getFrameInfo().setHasCalls(true);
860 return CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
861 return getOrCreateVReg(*CI.getCalledValue());
865 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
867 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
870 unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI);
871 MachineInstrBuilder MIB =
872 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
874 for (auto &Arg : CI.arg_operands()) {
875 // Some intrinsics take metadata parameters. Reject them.
876 if (isa<MetadataAsValue>(Arg))
878 MIB.addUse(getOrCreateVReg(*Arg));
881 // Add a MachineMemOperand if it is a target mem intrinsic.
882 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
883 TargetLowering::IntrinsicInfo Info;
884 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
885 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
886 uint64_t Size = Info.memVT.getStoreSize();
887 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
888 Info.flags, Size, Info.align));
894 bool IRTranslator::translateInvoke(const User &U,
895 MachineIRBuilder &MIRBuilder) {
896 const InvokeInst &I = cast<InvokeInst>(U);
897 MCContext &Context = MF->getContext();
899 const BasicBlock *ReturnBB = I.getSuccessor(0);
900 const BasicBlock *EHPadBB = I.getSuccessor(1);
902 const Value *Callee = I.getCalledValue();
903 const Function *Fn = dyn_cast<Function>(Callee);
904 if (isa<InlineAsm>(Callee))
907 // FIXME: support invoking patchpoint and statepoint intrinsics.
908 if (Fn && Fn->isIntrinsic())
911 // FIXME: support whatever these are.
912 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
915 // FIXME: support Windows exception handling.
916 if (!isa<LandingPadInst>(EHPadBB->front()))
919 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
920 // the region covered by the try.
921 MCSymbol *BeginSymbol = Context.createTempSymbol();
922 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
924 unsigned Res = I.getType()->isVoidTy() ? 0 : getOrCreateVReg(I);
925 SmallVector<unsigned, 8> Args;
926 for (auto &Arg: I.arg_operands())
927 Args.push_back(getOrCreateVReg(*Arg));
929 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
930 [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
933 MCSymbol *EndSymbol = Context.createTempSymbol();
934 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
936 // FIXME: track probabilities.
937 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
938 &ReturnMBB = getMBB(*ReturnBB);
939 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
940 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
941 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
942 MIRBuilder.buildBr(ReturnMBB);
947 bool IRTranslator::translateLandingPad(const User &U,
948 MachineIRBuilder &MIRBuilder) {
949 const LandingPadInst &LP = cast<LandingPadInst>(U);
951 MachineBasicBlock &MBB = MIRBuilder.getMBB();
952 addLandingPadInfo(LP, MBB);
956 // If there aren't registers to copy the values into (e.g., during SjLj
957 // exceptions), then don't bother.
958 auto &TLI = *MF->getSubtarget().getTargetLowering();
959 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
960 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
961 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
964 // If landingpad's return type is token type, we don't create DAG nodes
965 // for its exception pointer and selector value. The extraction of exception
966 // pointer or selector value from token type landingpads is not currently
968 if (LP.getType()->isTokenTy())
971 // Add a label to mark the beginning of the landing pad. Deletion of the
972 // landing pad can thus be detected via the MachineModuleInfo.
973 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
974 .addSym(MF->addLandingPad(&MBB));
976 LLT Ty = getLLTForType(*LP.getType(), *DL);
977 unsigned Undef = MRI->createGenericVirtualRegister(Ty);
978 MIRBuilder.buildUndef(Undef);
980 SmallVector<LLT, 2> Tys;
981 for (Type *Ty : cast<StructType>(LP.getType())->elements())
982 Tys.push_back(getLLTForType(*Ty, *DL));
983 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
985 // Mark exception register as live in.
986 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
990 MBB.addLiveIn(ExceptionReg);
991 unsigned VReg = MRI->createGenericVirtualRegister(Tys[0]),
992 Tmp = MRI->createGenericVirtualRegister(Ty);
993 MIRBuilder.buildCopy(VReg, ExceptionReg);
994 MIRBuilder.buildInsert(Tmp, Undef, VReg, 0);
996 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1000 MBB.addLiveIn(SelectorReg);
1002 // N.b. the exception selector register always has pointer type and may not
1003 // match the actual IR-level type in the landingpad so an extra cast is
1005 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1006 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1008 VReg = MRI->createGenericVirtualRegister(Tys[1]);
1009 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT).addDef(VReg).addUse(PtrVReg);
1010 MIRBuilder.buildInsert(getOrCreateVReg(LP), Tmp, VReg,
1011 Tys[0].getSizeInBits());
1015 bool IRTranslator::translateAlloca(const User &U,
1016 MachineIRBuilder &MIRBuilder) {
1017 auto &AI = cast<AllocaInst>(U);
1019 if (AI.isStaticAlloca()) {
1020 unsigned Res = getOrCreateVReg(AI);
1021 int FI = getOrCreateFrameIndex(AI);
1022 MIRBuilder.buildFrameIndex(Res, FI);
1026 // Now we're in the harder dynamic case.
1027 Type *Ty = AI.getAllocatedType();
1029 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1031 unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1033 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1034 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1035 if (MRI->getType(NumElts) != IntPtrTy) {
1036 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1037 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1041 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1043 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1044 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1046 LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1047 auto &TLI = *MF->getSubtarget().getTargetLowering();
1048 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1050 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1051 MIRBuilder.buildCopy(SPTmp, SPReg);
1053 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1054 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1056 // Handle alignment. We have to realign if the allocation granule was smaller
1057 // than stack alignment, or the specific alloca requires more than stack
1059 unsigned StackAlign =
1060 MF->getSubtarget().getFrameLowering()->getStackAlignment();
1061 Align = std::max(Align, StackAlign);
1062 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1063 // Round the size of the allocation up to the stack alignment size
1064 // by add SA-1 to the size. This doesn't overflow because we're computing
1065 // an address inside an alloca.
1066 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1067 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1068 AllocTmp = AlignedAlloc;
1071 MIRBuilder.buildCopy(SPReg, AllocTmp);
1072 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1074 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1075 assert(MF->getFrameInfo().hasVarSizedObjects());
1079 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1080 // FIXME: We may need more info about the type. Because of how LLT works,
1081 // we're completely discarding the i64/double distinction here (amongst
1082 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1083 // anyway but that's not guaranteed.
1084 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1085 .addDef(getOrCreateVReg(U))
1086 .addUse(getOrCreateVReg(*U.getOperand(0)))
1087 .addImm(DL->getABITypeAlignment(U.getType()));
1091 bool IRTranslator::translateInsertElement(const User &U,
1092 MachineIRBuilder &MIRBuilder) {
1093 // If it is a <1 x Ty> vector, use the scalar as it is
1094 // not a legal vector type in LLT.
1095 if (U.getType()->getVectorNumElements() == 1) {
1096 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1097 ValToVReg[&U] = Elt;
1100 unsigned Res = getOrCreateVReg(U);
1101 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1102 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1103 unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1104 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1108 bool IRTranslator::translateExtractElement(const User &U,
1109 MachineIRBuilder &MIRBuilder) {
1110 // If it is a <1 x Ty> vector, use the scalar as it is
1111 // not a legal vector type in LLT.
1112 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1113 unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1114 ValToVReg[&U] = Elt;
1117 unsigned Res = getOrCreateVReg(U);
1118 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1119 unsigned Idx = getOrCreateVReg(*U.getOperand(1));
1120 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1124 bool IRTranslator::translateShuffleVector(const User &U,
1125 MachineIRBuilder &MIRBuilder) {
1126 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1127 .addDef(getOrCreateVReg(U))
1128 .addUse(getOrCreateVReg(*U.getOperand(0)))
1129 .addUse(getOrCreateVReg(*U.getOperand(1)))
1130 .addUse(getOrCreateVReg(*U.getOperand(2)));
1134 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1135 const PHINode &PI = cast<PHINode>(U);
1136 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI);
1137 MIB.addDef(getOrCreateVReg(PI));
1139 PendingPHIs.emplace_back(&PI, MIB.getInstr());
1143 void IRTranslator::finishPendingPhis() {
1144 for (std::pair<const PHINode *, MachineInstr *> &Phi : PendingPHIs) {
1145 const PHINode *PI = Phi.first;
1146 MachineInstrBuilder MIB(*MF, Phi.second);
1148 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1149 // won't create extra control flow here, otherwise we need to find the
1150 // dominating predecessor here (or perhaps force the weirder IRTranslators
1151 // to provide a simple boundary).
1152 SmallSet<const BasicBlock *, 4> HandledPreds;
1154 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1155 auto IRPred = PI->getIncomingBlock(i);
1156 if (HandledPreds.count(IRPred))
1159 HandledPreds.insert(IRPred);
1160 unsigned ValReg = getOrCreateVReg(*PI->getIncomingValue(i));
1161 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1162 assert(Pred->isSuccessor(MIB->getParent()) &&
1163 "incorrect CFG at MachineBasicBlock level");
1171 bool IRTranslator::translate(const Instruction &Inst) {
1172 CurBuilder.setDebugLoc(Inst.getDebugLoc());
1173 switch(Inst.getOpcode()) {
1174 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1175 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
1176 #include "llvm/IR/Instruction.def"
1182 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1183 if (auto CI = dyn_cast<ConstantInt>(&C))
1184 EntryBuilder.buildConstant(Reg, *CI);
1185 else if (auto CF = dyn_cast<ConstantFP>(&C))
1186 EntryBuilder.buildFConstant(Reg, *CF);
1187 else if (isa<UndefValue>(C))
1188 EntryBuilder.buildUndef(Reg);
1189 else if (isa<ConstantPointerNull>(C))
1190 EntryBuilder.buildConstant(Reg, 0);
1191 else if (auto GV = dyn_cast<GlobalValue>(&C))
1192 EntryBuilder.buildGlobalValue(Reg, GV);
1193 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1194 if (!CAZ->getType()->isVectorTy())
1196 // Return the scalar if it is a <1 x Ty> vector.
1197 if (CAZ->getNumElements() == 1)
1198 return translate(*CAZ->getElementValue(0u), Reg);
1199 std::vector<unsigned> Ops;
1200 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1201 Constant &Elt = *CAZ->getElementValue(i);
1202 Ops.push_back(getOrCreateVReg(Elt));
1204 EntryBuilder.buildMerge(Reg, Ops);
1205 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1206 // Return the scalar if it is a <1 x Ty> vector.
1207 if (CV->getNumElements() == 1)
1208 return translate(*CV->getElementAsConstant(0), Reg);
1209 std::vector<unsigned> Ops;
1210 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1211 Constant &Elt = *CV->getElementAsConstant(i);
1212 Ops.push_back(getOrCreateVReg(Elt));
1214 EntryBuilder.buildMerge(Reg, Ops);
1215 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1216 switch(CE->getOpcode()) {
1217 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1218 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
1219 #include "llvm/IR/Instruction.def"
1223 } else if (auto CS = dyn_cast<ConstantStruct>(&C)) {
1224 // Return the element if it is a single element ConstantStruct.
1225 if (CS->getNumOperands() == 1) {
1226 unsigned EltReg = getOrCreateVReg(*CS->getOperand(0));
1227 EntryBuilder.buildCast(Reg, EltReg);
1230 SmallVector<unsigned, 4> Ops;
1231 SmallVector<uint64_t, 4> Indices;
1232 uint64_t Offset = 0;
1233 for (unsigned i = 0; i < CS->getNumOperands(); ++i) {
1234 unsigned OpReg = getOrCreateVReg(*CS->getOperand(i));
1235 Ops.push_back(OpReg);
1236 Indices.push_back(Offset);
1237 Offset += MRI->getType(OpReg).getSizeInBits();
1239 EntryBuilder.buildSequence(Reg, Ops, Indices);
1240 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1241 if (CV->getNumOperands() == 1)
1242 return translate(*CV->getOperand(0), Reg);
1243 SmallVector<unsigned, 4> Ops;
1244 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1245 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1247 EntryBuilder.buildMerge(Reg, Ops);
1254 void IRTranslator::finalizeFunction() {
1255 // Release the memory used by the different maps we
1256 // needed during the translation.
1257 PendingPHIs.clear();
1259 FrameIndices.clear();
1260 MachinePreds.clear();
1261 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1262 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1263 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1264 EntryBuilder = MachineIRBuilder();
1265 CurBuilder = MachineIRBuilder();
1268 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
1270 const Function &F = MF->getFunction();
1273 CLI = MF->getSubtarget().getCallLowering();
1274 CurBuilder.setMF(*MF);
1275 EntryBuilder.setMF(*MF);
1276 MRI = &MF->getRegInfo();
1277 DL = &F.getParent()->getDataLayout();
1278 TPC = &getAnalysis<TargetPassConfig>();
1279 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1281 assert(PendingPHIs.empty() && "stale PHIs");
1283 if (!DL->isLittleEndian()) {
1284 // Currently we don't properly handle big endian code.
1285 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1286 F.getSubprogram(), &F.getEntryBlock());
1287 R << "unable to translate in big endian mode";
1288 reportTranslationError(*MF, *TPC, *ORE, R);
1291 // Release the per-function state when we return, whether we succeeded or not.
1292 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1294 // Setup a separate basic-block for the arguments and constants
1295 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1296 MF->push_back(EntryBB);
1297 EntryBuilder.setMBB(*EntryBB);
1299 // Create all blocks, in IR order, to preserve the layout.
1300 for (const BasicBlock &BB: F) {
1301 auto *&MBB = BBToMBB[&BB];
1303 MBB = MF->CreateMachineBasicBlock(&BB);
1306 if (BB.hasAddressTaken())
1307 MBB->setHasAddressTaken();
1310 // Make our arguments/constants entry block fallthrough to the IR entry block.
1311 EntryBB->addSuccessor(&getMBB(F.front()));
1313 // Lower the actual args into this basic block.
1314 SmallVector<unsigned, 8> VRegArgs;
1315 for (const Argument &Arg: F.args()) {
1316 if (DL->getTypeStoreSize(Arg.getType()) == 0)
1317 continue; // Don't handle zero sized types.
1318 VRegArgs.push_back(getOrCreateVReg(Arg));
1320 if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
1321 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1322 F.getSubprogram(), &F.getEntryBlock());
1323 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1324 reportTranslationError(*MF, *TPC, *ORE, R);
1328 // And translate the function!
1329 for (const BasicBlock &BB: F) {
1330 MachineBasicBlock &MBB = getMBB(BB);
1331 // Set the insertion point of all the following translations to
1332 // the end of this basic block.
1333 CurBuilder.setMBB(MBB);
1335 for (const Instruction &Inst: BB) {
1336 if (translate(Inst))
1339 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1340 Inst.getDebugLoc(), &BB);
1341 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1343 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1344 std::string InstStrStorage;
1345 raw_string_ostream InstStr(InstStrStorage);
1348 R << ": '" << InstStr.str() << "'";
1351 reportTranslationError(*MF, *TPC, *ORE, R);
1356 finishPendingPhis();
1358 // Merge the argument lowering and constants block with its single
1359 // successor, the LLVM-IR entry block. We want the basic block to
1361 assert(EntryBB->succ_size() == 1 &&
1362 "Custom BB used for lowering should have only one successor");
1363 // Get the successor of the current entry block.
1364 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1365 assert(NewEntryBB.pred_size() == 1 &&
1366 "LLVM-IR entry block has a predecessor!?");
1367 // Move all the instruction from the current entry block to the
1369 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1372 // Update the live-in information for the new entry block.
1373 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1374 NewEntryBB.addLiveIn(LiveIn);
1375 NewEntryBB.sortUniqueLiveIns();
1377 // Get rid of the now empty basic block.
1378 EntryBB->removeSuccessor(&NewEntryBB);
1379 MF->remove(EntryBB);
1380 MF->DeleteMachineBasicBlock(EntryBB);
1382 assert(&MF->front() == &NewEntryBB &&
1383 "New entry wasn't next in the list of basic block!");