1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the IRTranslator class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
21 #include "llvm/CodeGen/LowLevelType.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineMemOperand.h"
27 #include "llvm/CodeGen/MachineOperand.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetFrameLowering.h"
30 #include "llvm/CodeGen/TargetLowering.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/CodeGen/TargetSubtargetInfo.h"
34 #include "llvm/IR/BasicBlock.h"
35 #include "llvm/IR/Constant.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DebugInfo.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GetElementPtrTypeIterator.h"
42 #include "llvm/IR/InlineAsm.h"
43 #include "llvm/IR/InstrTypes.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Metadata.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/MC/MCContext.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CodeGen.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/LowLevelTypeImpl.h"
59 #include "llvm/Support/MathExtras.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Target/TargetIntrinsicInfo.h"
62 #include "llvm/Target/TargetMachine.h"
71 #define DEBUG_TYPE "irtranslator"
75 char IRTranslator::ID = 0;
77 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
79 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
80 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
83 static void reportTranslationError(MachineFunction &MF,
84 const TargetPassConfig &TPC,
85 OptimizationRemarkEmitter &ORE,
86 OptimizationRemarkMissed &R) {
87 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
89 // Print the function name explicitly if we don't have a debug location (which
90 // makes the diagnostic less useful) or if we're going to emit a raw error.
91 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
92 R << (" (in function: " + MF.getName() + ")").str();
94 if (TPC.isGlobalISelAbortEnabled())
95 report_fatal_error(R.getMsg());
100 IRTranslator::IRTranslator() : MachineFunctionPass(ID) {
101 initializeIRTranslatorPass(*PassRegistry::getPassRegistry());
104 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
105 AU.addRequired<TargetPassConfig>();
106 MachineFunctionPass::getAnalysisUsage(AU);
109 static void computeValueLLTs(const DataLayout &DL, Type &Ty,
110 SmallVectorImpl<LLT> &ValueTys,
111 SmallVectorImpl<uint64_t> *Offsets = nullptr,
112 uint64_t StartingOffset = 0) {
113 // Given a struct type, recursively traverse the elements.
114 if (StructType *STy = dyn_cast<StructType>(&Ty)) {
115 const StructLayout *SL = DL.getStructLayout(STy);
116 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
117 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
118 StartingOffset + SL->getElementOffset(I));
121 // Given an array type, recursively traverse the elements.
122 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
123 Type *EltTy = ATy->getElementType();
124 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
125 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
126 computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
127 StartingOffset + i * EltSize);
130 // Interpret void as zero return values.
133 // Base case: we can get an LLT for this LLVM IR type.
134 ValueTys.push_back(getLLTForType(Ty, DL));
135 if (Offsets != nullptr)
136 Offsets->push_back(StartingOffset * 8);
139 IRTranslator::ValueToVRegInfo::VRegListT &
140 IRTranslator::allocateVRegs(const Value &Val) {
141 assert(!VMap.contains(Val) && "Value already allocated in VMap");
142 auto *Regs = VMap.getVRegs(Val);
143 auto *Offsets = VMap.getOffsets(Val);
144 SmallVector<LLT, 4> SplitTys;
145 computeValueLLTs(*DL, *Val.getType(), SplitTys,
146 Offsets->empty() ? Offsets : nullptr);
147 for (unsigned i = 0; i < SplitTys.size(); ++i)
152 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
153 auto VRegsIt = VMap.findVRegs(Val);
154 if (VRegsIt != VMap.vregs_end())
155 return *VRegsIt->second;
157 if (Val.getType()->isVoidTy())
158 return *VMap.getVRegs(Val);
160 // Create entry for this type.
161 auto *VRegs = VMap.getVRegs(Val);
162 auto *Offsets = VMap.getOffsets(Val);
164 assert(Val.getType()->isSized() &&
165 "Don't know how to create an empty vreg");
167 SmallVector<LLT, 4> SplitTys;
168 computeValueLLTs(*DL, *Val.getType(), SplitTys,
169 Offsets->empty() ? Offsets : nullptr);
171 if (!isa<Constant>(Val)) {
172 for (auto Ty : SplitTys)
173 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
177 if (Val.getType()->isAggregateType()) {
178 // UndefValue, ConstantAggregateZero
179 auto &C = cast<Constant>(Val);
181 while (auto Elt = C.getAggregateElement(Idx++)) {
182 auto EltRegs = getOrCreateVRegs(*Elt);
183 std::copy(EltRegs.begin(), EltRegs.end(), std::back_inserter(*VRegs));
186 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
187 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
188 bool Success = translate(cast<Constant>(Val), VRegs->front());
190 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
191 MF->getFunction().getSubprogram(),
192 &MF->getFunction().getEntryBlock());
193 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
194 reportTranslationError(*MF, *TPC, *ORE, R);
202 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
203 if (FrameIndices.find(&AI) != FrameIndices.end())
204 return FrameIndices[&AI];
206 unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
208 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
210 // Always allocate at least one byte.
211 Size = std::max(Size, 1u);
213 unsigned Alignment = AI.getAlignment();
215 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
217 int &FI = FrameIndices[&AI];
218 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
222 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
223 unsigned Alignment = 0;
224 Type *ValTy = nullptr;
225 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
226 Alignment = SI->getAlignment();
227 ValTy = SI->getValueOperand()->getType();
228 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
229 Alignment = LI->getAlignment();
230 ValTy = LI->getType();
232 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
233 R << "unable to translate memop: " << ore::NV("Opcode", &I);
234 reportTranslationError(*MF, *TPC, *ORE, R);
238 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
241 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
242 MachineBasicBlock *&MBB = BBToMBB[&BB];
243 assert(MBB && "BasicBlock was not encountered before");
247 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
248 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
249 MachinePreds[Edge].push_back(NewPred);
252 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
253 MachineIRBuilder &MIRBuilder) {
254 // FIXME: handle signed/unsigned wrapping flags.
256 // Get or create a virtual register for each value.
257 // Unless the value is a Constant => loadimm cst?
258 // or inline constant each time?
259 // Creation of a virtual register needs to have a size.
260 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
261 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
262 unsigned Res = getOrCreateVReg(U);
263 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op0).addUse(Op1);
267 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
268 // -0.0 - X --> G_FNEG
269 if (isa<Constant>(U.getOperand(0)) &&
270 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
271 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
272 .addDef(getOrCreateVReg(U))
273 .addUse(getOrCreateVReg(*U.getOperand(1)));
276 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
279 bool IRTranslator::translateCompare(const User &U,
280 MachineIRBuilder &MIRBuilder) {
281 const CmpInst *CI = dyn_cast<CmpInst>(&U);
282 unsigned Op0 = getOrCreateVReg(*U.getOperand(0));
283 unsigned Op1 = getOrCreateVReg(*U.getOperand(1));
284 unsigned Res = getOrCreateVReg(U);
285 CmpInst::Predicate Pred =
286 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
287 cast<ConstantExpr>(U).getPredicate());
288 if (CmpInst::isIntPredicate(Pred))
289 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
290 else if (Pred == CmpInst::FCMP_FALSE)
291 MIRBuilder.buildCopy(
292 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
293 else if (Pred == CmpInst::FCMP_TRUE)
294 MIRBuilder.buildCopy(
295 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
297 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1);
302 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
303 const ReturnInst &RI = cast<ReturnInst>(U);
304 const Value *Ret = RI.getReturnValue();
305 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
307 // The target may mess up with the insertion point, but
308 // this is not important as a return is the last instruction
309 // of the block anyway.
311 // FIXME: this interface should simplify when CallLowering gets adapted to
312 // multiple VRegs per Value.
313 unsigned VReg = Ret ? packRegs(*Ret, MIRBuilder) : 0;
314 return CLI->lowerReturn(MIRBuilder, Ret, VReg);
317 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
318 const BranchInst &BrInst = cast<BranchInst>(U);
320 if (!BrInst.isUnconditional()) {
321 // We want a G_BRCOND to the true BB followed by an unconditional branch.
322 unsigned Tst = getOrCreateVReg(*BrInst.getCondition());
323 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
324 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
325 MIRBuilder.buildBrCond(Tst, TrueBB);
328 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
329 MachineBasicBlock &TgtBB = getMBB(BrTgt);
330 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
332 // If the unconditional target is the layout successor, fallthrough.
333 if (!CurBB.isLayoutSuccessor(&TgtBB))
334 MIRBuilder.buildBr(TgtBB);
337 for (const BasicBlock *Succ : BrInst.successors())
338 CurBB.addSuccessor(&getMBB(*Succ));
342 bool IRTranslator::translateSwitch(const User &U,
343 MachineIRBuilder &MIRBuilder) {
344 // For now, just translate as a chain of conditional branches.
345 // FIXME: could we share most of the logic/code in
346 // SelectionDAGBuilder::visitSwitch between SelectionDAG and GlobalISel?
347 // At first sight, it seems most of the logic in there is independent of
348 // SelectionDAG-specifics and a lot of work went in to optimize switch
349 // lowering in there.
351 const SwitchInst &SwInst = cast<SwitchInst>(U);
352 const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition());
353 const BasicBlock *OrigBB = SwInst.getParent();
355 LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL);
356 for (auto &CaseIt : SwInst.cases()) {
357 const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue());
358 const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1);
359 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue);
360 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
361 const BasicBlock *TrueBB = CaseIt.getCaseSuccessor();
362 MachineBasicBlock &TrueMBB = getMBB(*TrueBB);
364 MIRBuilder.buildBrCond(Tst, TrueMBB);
365 CurMBB.addSuccessor(&TrueMBB);
366 addMachineCFGPred({OrigBB, TrueBB}, &CurMBB);
368 MachineBasicBlock *FalseMBB =
369 MF->CreateMachineBasicBlock(SwInst.getParent());
370 // Insert the comparison blocks one after the other.
371 MF->insert(std::next(CurMBB.getIterator()), FalseMBB);
372 MIRBuilder.buildBr(*FalseMBB);
373 CurMBB.addSuccessor(FalseMBB);
375 MIRBuilder.setMBB(*FalseMBB);
377 // handle default case
378 const BasicBlock *DefaultBB = SwInst.getDefaultDest();
379 MachineBasicBlock &DefaultMBB = getMBB(*DefaultBB);
380 MIRBuilder.buildBr(DefaultMBB);
381 MachineBasicBlock &CurMBB = MIRBuilder.getMBB();
382 CurMBB.addSuccessor(&DefaultMBB);
383 addMachineCFGPred({OrigBB, DefaultBB}, &CurMBB);
388 bool IRTranslator::translateIndirectBr(const User &U,
389 MachineIRBuilder &MIRBuilder) {
390 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
392 const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress());
393 MIRBuilder.buildBrIndirect(Tgt);
396 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
397 for (const BasicBlock *Succ : BrInst.successors())
398 CurBB.addSuccessor(&getMBB(*Succ));
403 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
404 const LoadInst &LI = cast<LoadInst>(U);
406 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
407 : MachineMemOperand::MONone;
408 Flags |= MachineMemOperand::MOLoad;
410 if (DL->getTypeStoreSize(LI.getType()) == 0)
413 ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
414 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
415 unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
417 for (unsigned i = 0; i < Regs.size(); ++i) {
419 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
421 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
422 unsigned BaseAlign = getMemOpAlignment(LI);
423 auto MMO = MF->getMachineMemOperand(
424 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
425 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
426 LI.getSyncScopeID(), LI.getOrdering());
427 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
433 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
434 const StoreInst &SI = cast<StoreInst>(U);
435 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
436 : MachineMemOperand::MONone;
437 Flags |= MachineMemOperand::MOStore;
439 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
442 ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
443 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
444 unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
446 for (unsigned i = 0; i < Vals.size(); ++i) {
448 MIRBuilder.materializeGEP(Addr, Base, LLT::scalar(64), Offsets[i] / 8);
450 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
451 unsigned BaseAlign = getMemOpAlignment(SI);
452 auto MMO = MF->getMachineMemOperand(
453 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
454 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
455 SI.getSyncScopeID(), SI.getOrdering());
456 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
461 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
462 const Value *Src = U.getOperand(0);
463 Type *Int32Ty = Type::getInt32Ty(U.getContext());
465 // getIndexedOffsetInType is designed for GEPs, so the first index is the
466 // usual array element rather than looking into the actual aggregate.
467 SmallVector<Value *, 1> Indices;
468 Indices.push_back(ConstantInt::get(Int32Ty, 0));
470 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
471 for (auto Idx : EVI->indices())
472 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
473 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
474 for (auto Idx : IVI->indices())
475 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
477 for (unsigned i = 1; i < U.getNumOperands(); ++i)
478 Indices.push_back(U.getOperand(i));
481 return 8 * static_cast<uint64_t>(
482 DL.getIndexedOffsetInType(Src->getType(), Indices));
485 bool IRTranslator::translateExtractValue(const User &U,
486 MachineIRBuilder &MIRBuilder) {
487 const Value *Src = U.getOperand(0);
488 uint64_t Offset = getOffsetFromIndices(U, *DL);
489 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
490 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
491 unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
493 auto &DstRegs = allocateVRegs(U);
495 for (unsigned i = 0; i < DstRegs.size(); ++i)
496 DstRegs[i] = SrcRegs[Idx++];
501 bool IRTranslator::translateInsertValue(const User &U,
502 MachineIRBuilder &MIRBuilder) {
503 const Value *Src = U.getOperand(0);
504 uint64_t Offset = getOffsetFromIndices(U, *DL);
505 auto &DstRegs = allocateVRegs(U);
506 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
507 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
508 ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
509 auto InsertedIt = InsertedRegs.begin();
511 for (unsigned i = 0; i < DstRegs.size(); ++i) {
512 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
513 DstRegs[i] = *InsertedIt++;
515 DstRegs[i] = SrcRegs[i];
521 bool IRTranslator::translateSelect(const User &U,
522 MachineIRBuilder &MIRBuilder) {
523 unsigned Tst = getOrCreateVReg(*U.getOperand(0));
524 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
525 ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
526 ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
528 for (unsigned i = 0; i < ResRegs.size(); ++i)
529 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i]);
534 bool IRTranslator::translateBitCast(const User &U,
535 MachineIRBuilder &MIRBuilder) {
536 // If we're bitcasting to the source type, we can reuse the source vreg.
537 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
538 getLLTForType(*U.getType(), *DL)) {
539 unsigned SrcReg = getOrCreateVReg(*U.getOperand(0));
540 auto &Regs = *VMap.getVRegs(U);
541 // If we already assigned a vreg for this bitcast, we can't change that.
542 // Emit a copy to satisfy the users we already emitted.
544 MIRBuilder.buildCopy(Regs[0], SrcReg);
546 Regs.push_back(SrcReg);
547 VMap.getOffsets(U)->push_back(0);
551 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
554 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
555 MachineIRBuilder &MIRBuilder) {
556 unsigned Op = getOrCreateVReg(*U.getOperand(0));
557 unsigned Res = getOrCreateVReg(U);
558 MIRBuilder.buildInstr(Opcode).addDef(Res).addUse(Op);
562 bool IRTranslator::translateGetElementPtr(const User &U,
563 MachineIRBuilder &MIRBuilder) {
564 // FIXME: support vector GEPs.
565 if (U.getType()->isVectorTy())
568 Value &Op0 = *U.getOperand(0);
569 unsigned BaseReg = getOrCreateVReg(Op0);
570 Type *PtrIRTy = Op0.getType();
571 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
572 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
573 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
576 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
578 const Value *Idx = GTI.getOperand();
579 if (StructType *StTy = GTI.getStructTypeOrNull()) {
580 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
581 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
584 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
586 // If this is a scalar constant or a splat vector of constants,
587 // handle it quickly.
588 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
589 Offset += ElementSize * CI->getSExtValue();
594 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
596 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
597 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetReg);
599 BaseReg = NewBaseReg;
603 unsigned IdxReg = getOrCreateVReg(*Idx);
604 if (MRI->getType(IdxReg) != OffsetTy) {
605 unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
606 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
610 // N = N + Idx * ElementSize;
611 // Avoid doing it for ElementSize of 1.
612 unsigned GepOffsetReg;
613 if (ElementSize != 1) {
614 unsigned ElementSizeReg =
615 getOrCreateVReg(*ConstantInt::get(OffsetIRTy, ElementSize));
617 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
618 MIRBuilder.buildMul(GepOffsetReg, ElementSizeReg, IdxReg);
620 GepOffsetReg = IdxReg;
622 unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
623 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
624 BaseReg = NewBaseReg;
629 unsigned OffsetReg = getOrCreateVReg(*ConstantInt::get(OffsetIRTy, Offset));
630 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetReg);
634 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
638 bool IRTranslator::translateMemfunc(const CallInst &CI,
639 MachineIRBuilder &MIRBuilder,
641 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
642 Type *DstTy = CI.getArgOperand(0)->getType();
643 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
644 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
647 SmallVector<CallLowering::ArgInfo, 8> Args;
648 for (int i = 0; i < 3; ++i) {
649 const auto &Arg = CI.getArgOperand(i);
650 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
655 case Intrinsic::memmove:
656 case Intrinsic::memcpy: {
657 Type *SrcTy = CI.getArgOperand(1)->getType();
658 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
660 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
663 case Intrinsic::memset:
670 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
671 MachineOperand::CreateES(Callee),
672 CallLowering::ArgInfo(0, CI.getType()), Args);
675 void IRTranslator::getStackGuard(unsigned DstReg,
676 MachineIRBuilder &MIRBuilder) {
677 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
678 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
679 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
682 auto &TLI = *MF->getSubtarget().getTargetLowering();
683 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
687 MachinePointerInfo MPInfo(Global);
688 MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
689 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
690 MachineMemOperand::MODereferenceable;
692 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
693 DL->getPointerABIAlignment(0));
694 MIB.setMemRefs(MemRefs, MemRefs + 1);
697 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
698 MachineIRBuilder &MIRBuilder) {
699 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
700 auto MIB = MIRBuilder.buildInstr(Op)
703 .addUse(getOrCreateVReg(*CI.getOperand(0)))
704 .addUse(getOrCreateVReg(*CI.getOperand(1)));
706 if (Op == TargetOpcode::G_UADDE || Op == TargetOpcode::G_USUBE) {
707 unsigned Zero = getOrCreateVReg(
708 *Constant::getNullValue(Type::getInt1Ty(CI.getContext())));
715 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
716 MachineIRBuilder &MIRBuilder) {
720 case Intrinsic::lifetime_start:
721 case Intrinsic::lifetime_end:
722 // Stack coloring is not enabled in O0 (which we care about now) so we can
723 // drop these. Make sure someone notices when we start compiling at higher
725 if (MF->getTarget().getOptLevel() != CodeGenOpt::None)
728 case Intrinsic::dbg_declare: {
729 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
730 assert(DI.getVariable() && "Missing variable");
732 const Value *Address = DI.getAddress();
733 if (!Address || isa<UndefValue>(Address)) {
734 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
738 assert(DI.getVariable()->isValidLocationForIntrinsic(
739 MIRBuilder.getDebugLoc()) &&
740 "Expected inlined-at fields to agree");
741 auto AI = dyn_cast<AllocaInst>(Address);
742 if (AI && AI->isStaticAlloca()) {
743 // Static allocas are tracked at the MF level, no need for DBG_VALUE
744 // instructions (in fact, they get ignored if they *do* exist).
745 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
746 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
748 MIRBuilder.buildDirectDbgValue(getOrCreateVReg(*Address),
749 DI.getVariable(), DI.getExpression());
752 case Intrinsic::vaend:
753 // No target I know of cares about va_end. Certainly no in-tree target
754 // does. Simplest intrinsic ever!
756 case Intrinsic::vastart: {
757 auto &TLI = *MF->getSubtarget().getTargetLowering();
758 Value *Ptr = CI.getArgOperand(0);
759 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
761 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
762 .addUse(getOrCreateVReg(*Ptr))
763 .addMemOperand(MF->getMachineMemOperand(
764 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 0));
767 case Intrinsic::dbg_value: {
768 // This form of DBG_VALUE is target-independent.
769 const DbgValueInst &DI = cast<DbgValueInst>(CI);
770 const Value *V = DI.getValue();
771 assert(DI.getVariable()->isValidLocationForIntrinsic(
772 MIRBuilder.getDebugLoc()) &&
773 "Expected inlined-at fields to agree");
775 // Currently the optimizer can produce this; insert an undef to
776 // help debugging. Probably the optimizer should not do this.
777 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
778 } else if (const auto *CI = dyn_cast<Constant>(V)) {
779 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
781 unsigned Reg = getOrCreateVReg(*V);
782 // FIXME: This does not handle register-indirect values at offset 0. The
783 // direct/indirect thing shouldn't really be handled by something as
784 // implicit as reg+noreg vs reg+imm in the first palce, but it seems
785 // pretty baked in right now.
786 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
790 case Intrinsic::uadd_with_overflow:
791 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDE, MIRBuilder);
792 case Intrinsic::sadd_with_overflow:
793 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
794 case Intrinsic::usub_with_overflow:
795 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBE, MIRBuilder);
796 case Intrinsic::ssub_with_overflow:
797 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
798 case Intrinsic::umul_with_overflow:
799 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
800 case Intrinsic::smul_with_overflow:
801 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
803 MIRBuilder.buildInstr(TargetOpcode::G_FPOW)
804 .addDef(getOrCreateVReg(CI))
805 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
806 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
809 MIRBuilder.buildInstr(TargetOpcode::G_FEXP)
810 .addDef(getOrCreateVReg(CI))
811 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
813 case Intrinsic::exp2:
814 MIRBuilder.buildInstr(TargetOpcode::G_FEXP2)
815 .addDef(getOrCreateVReg(CI))
816 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
819 MIRBuilder.buildInstr(TargetOpcode::G_FLOG)
820 .addDef(getOrCreateVReg(CI))
821 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
823 case Intrinsic::log2:
824 MIRBuilder.buildInstr(TargetOpcode::G_FLOG2)
825 .addDef(getOrCreateVReg(CI))
826 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
828 case Intrinsic::fabs:
829 MIRBuilder.buildInstr(TargetOpcode::G_FABS)
830 .addDef(getOrCreateVReg(CI))
831 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
834 MIRBuilder.buildInstr(TargetOpcode::G_FMA)
835 .addDef(getOrCreateVReg(CI))
836 .addUse(getOrCreateVReg(*CI.getArgOperand(0)))
837 .addUse(getOrCreateVReg(*CI.getArgOperand(1)))
838 .addUse(getOrCreateVReg(*CI.getArgOperand(2)));
840 case Intrinsic::fmuladd: {
841 const TargetMachine &TM = MF->getTarget();
842 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
843 unsigned Dst = getOrCreateVReg(CI);
844 unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0));
845 unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1));
846 unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2));
847 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
848 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
849 // TODO: Revisit this to see if we should move this part of the
850 // lowering to the combiner.
851 MIRBuilder.buildInstr(TargetOpcode::G_FMA, Dst, Op0, Op1, Op2);
853 LLT Ty = getLLTForType(*CI.getType(), *DL);
854 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, Ty, Op0, Op1);
855 MIRBuilder.buildInstr(TargetOpcode::G_FADD, Dst, FMul, Op2);
859 case Intrinsic::memcpy:
860 case Intrinsic::memmove:
861 case Intrinsic::memset:
862 return translateMemfunc(CI, MIRBuilder, ID);
863 case Intrinsic::eh_typeid_for: {
864 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
865 unsigned Reg = getOrCreateVReg(CI);
866 unsigned TypeID = MF->getTypeIDFor(GV);
867 MIRBuilder.buildConstant(Reg, TypeID);
870 case Intrinsic::objectsize: {
871 // If we don't know by now, we're never going to know.
872 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
874 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
877 case Intrinsic::stackguard:
878 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
880 case Intrinsic::stackprotector: {
881 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
882 unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy);
883 getStackGuard(GuardVal, MIRBuilder);
885 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
886 MIRBuilder.buildStore(
887 GuardVal, getOrCreateVReg(*Slot),
888 *MF->getMachineMemOperand(
889 MachinePointerInfo::getFixedStack(*MF,
890 getOrCreateFrameIndex(*Slot)),
891 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
892 PtrTy.getSizeInBits() / 8, 8));
899 bool IRTranslator::translateInlineAsm(const CallInst &CI,
900 MachineIRBuilder &MIRBuilder) {
901 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
902 if (!IA.getConstraintString().empty())
905 unsigned ExtraInfo = 0;
906 if (IA.hasSideEffects())
907 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
908 if (IA.getDialect() == InlineAsm::AD_Intel)
909 ExtraInfo |= InlineAsm::Extra_AsmDialect;
911 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
912 .addExternalSymbol(IA.getAsmString().c_str())
918 unsigned IRTranslator::packRegs(const Value &V,
919 MachineIRBuilder &MIRBuilder) {
920 ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
921 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
922 LLT BigTy = getLLTForType(*V.getType(), *DL);
924 if (Regs.size() == 1)
927 unsigned Dst = MRI->createGenericVirtualRegister(BigTy);
928 MIRBuilder.buildUndef(Dst);
929 for (unsigned i = 0; i < Regs.size(); ++i) {
930 unsigned NewDst = MRI->createGenericVirtualRegister(BigTy);
931 MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]);
937 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
938 MachineIRBuilder &MIRBuilder) {
939 ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
940 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
942 for (unsigned i = 0; i < Regs.size(); ++i)
943 MIRBuilder.buildExtract(Regs[i], Src, Offsets[i]);
946 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
947 const CallInst &CI = cast<CallInst>(U);
948 auto TII = MF->getTarget().getIntrinsicInfo();
949 const Function *F = CI.getCalledFunction();
951 // FIXME: support Windows dllimport function calls.
952 if (F && F->hasDLLImportStorageClass())
955 if (CI.isInlineAsm())
956 return translateInlineAsm(CI, MIRBuilder);
958 Intrinsic::ID ID = Intrinsic::not_intrinsic;
959 if (F && F->isIntrinsic()) {
960 ID = F->getIntrinsicID();
961 if (TII && ID == Intrinsic::not_intrinsic)
962 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
965 bool IsSplitType = valueIsSplit(CI);
966 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
967 unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
968 getLLTForType(*CI.getType(), *DL))
969 : getOrCreateVReg(CI);
971 SmallVector<unsigned, 8> Args;
972 for (auto &Arg: CI.arg_operands())
973 Args.push_back(packRegs(*Arg, MIRBuilder));
975 MF->getFrameInfo().setHasCalls(true);
976 bool Success = CLI->lowerCall(MIRBuilder, &CI, Res, Args, [&]() {
977 return getOrCreateVReg(*CI.getCalledValue());
981 unpackRegs(CI, Res, MIRBuilder);
985 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
987 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
991 if (!CI.getType()->isVoidTy()) {
994 MRI->createGenericVirtualRegister(getLLTForType(*CI.getType(), *DL));
996 Res = getOrCreateVReg(CI);
998 MachineInstrBuilder MIB =
999 MIRBuilder.buildIntrinsic(ID, Res, !CI.doesNotAccessMemory());
1001 for (auto &Arg : CI.arg_operands()) {
1002 // Some intrinsics take metadata parameters. Reject them.
1003 if (isa<MetadataAsValue>(Arg))
1005 MIB.addUse(packRegs(*Arg, MIRBuilder));
1009 unpackRegs(CI, Res, MIRBuilder);
1011 // Add a MachineMemOperand if it is a target mem intrinsic.
1012 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1013 TargetLowering::IntrinsicInfo Info;
1014 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1015 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1016 uint64_t Size = Info.memVT.getStoreSize();
1017 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1018 Info.flags, Size, Info.align));
1024 bool IRTranslator::translateInvoke(const User &U,
1025 MachineIRBuilder &MIRBuilder) {
1026 const InvokeInst &I = cast<InvokeInst>(U);
1027 MCContext &Context = MF->getContext();
1029 const BasicBlock *ReturnBB = I.getSuccessor(0);
1030 const BasicBlock *EHPadBB = I.getSuccessor(1);
1032 const Value *Callee = I.getCalledValue();
1033 const Function *Fn = dyn_cast<Function>(Callee);
1034 if (isa<InlineAsm>(Callee))
1037 // FIXME: support invoking patchpoint and statepoint intrinsics.
1038 if (Fn && Fn->isIntrinsic())
1041 // FIXME: support whatever these are.
1042 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1045 // FIXME: support Windows exception handling.
1046 if (!isa<LandingPadInst>(EHPadBB->front()))
1049 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1050 // the region covered by the try.
1051 MCSymbol *BeginSymbol = Context.createTempSymbol();
1052 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1055 MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1056 SmallVector<unsigned, 8> Args;
1057 for (auto &Arg: I.arg_operands())
1058 Args.push_back(packRegs(*Arg, MIRBuilder));
1060 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args,
1061 [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1064 unpackRegs(I, Res, MIRBuilder);
1066 MCSymbol *EndSymbol = Context.createTempSymbol();
1067 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1069 // FIXME: track probabilities.
1070 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1071 &ReturnMBB = getMBB(*ReturnBB);
1072 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1073 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1074 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1075 MIRBuilder.buildBr(ReturnMBB);
1080 bool IRTranslator::translateLandingPad(const User &U,
1081 MachineIRBuilder &MIRBuilder) {
1082 const LandingPadInst &LP = cast<LandingPadInst>(U);
1084 MachineBasicBlock &MBB = MIRBuilder.getMBB();
1085 addLandingPadInfo(LP, MBB);
1089 // If there aren't registers to copy the values into (e.g., during SjLj
1090 // exceptions), then don't bother.
1091 auto &TLI = *MF->getSubtarget().getTargetLowering();
1092 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1093 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1094 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1097 // If landingpad's return type is token type, we don't create DAG nodes
1098 // for its exception pointer and selector value. The extraction of exception
1099 // pointer or selector value from token type landingpads is not currently
1101 if (LP.getType()->isTokenTy())
1104 // Add a label to mark the beginning of the landing pad. Deletion of the
1105 // landing pad can thus be detected via the MachineModuleInfo.
1106 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1107 .addSym(MF->addLandingPad(&MBB));
1109 LLT Ty = getLLTForType(*LP.getType(), *DL);
1110 unsigned Undef = MRI->createGenericVirtualRegister(Ty);
1111 MIRBuilder.buildUndef(Undef);
1113 SmallVector<LLT, 2> Tys;
1114 for (Type *Ty : cast<StructType>(LP.getType())->elements())
1115 Tys.push_back(getLLTForType(*Ty, *DL));
1116 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1118 // Mark exception register as live in.
1119 unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1123 MBB.addLiveIn(ExceptionReg);
1124 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1125 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1127 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1131 MBB.addLiveIn(SelectorReg);
1132 unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1133 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1134 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1139 bool IRTranslator::translateAlloca(const User &U,
1140 MachineIRBuilder &MIRBuilder) {
1141 auto &AI = cast<AllocaInst>(U);
1143 if (AI.isStaticAlloca()) {
1144 unsigned Res = getOrCreateVReg(AI);
1145 int FI = getOrCreateFrameIndex(AI);
1146 MIRBuilder.buildFrameIndex(Res, FI);
1150 // FIXME: support stack probing for Windows.
1151 if (MF->getTarget().getTargetTriple().isOSWindows())
1154 // Now we're in the harder dynamic case.
1155 Type *Ty = AI.getAllocatedType();
1157 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1159 unsigned NumElts = getOrCreateVReg(*AI.getArraySize());
1161 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1162 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1163 if (MRI->getType(NumElts) != IntPtrTy) {
1164 unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1165 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1169 unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1171 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1172 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1174 LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1175 auto &TLI = *MF->getSubtarget().getTargetLowering();
1176 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
1178 unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1179 MIRBuilder.buildCopy(SPTmp, SPReg);
1181 unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1182 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1184 // Handle alignment. We have to realign if the allocation granule was smaller
1185 // than stack alignment, or the specific alloca requires more than stack
1187 unsigned StackAlign =
1188 MF->getSubtarget().getFrameLowering()->getStackAlignment();
1189 Align = std::max(Align, StackAlign);
1190 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1191 // Round the size of the allocation up to the stack alignment size
1192 // by add SA-1 to the size. This doesn't overflow because we're computing
1193 // an address inside an alloca.
1194 unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1195 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1196 AllocTmp = AlignedAlloc;
1199 MIRBuilder.buildCopy(SPReg, AllocTmp);
1200 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1202 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1203 assert(MF->getFrameInfo().hasVarSizedObjects());
1207 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1208 // FIXME: We may need more info about the type. Because of how LLT works,
1209 // we're completely discarding the i64/double distinction here (amongst
1210 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1211 // anyway but that's not guaranteed.
1212 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1213 .addDef(getOrCreateVReg(U))
1214 .addUse(getOrCreateVReg(*U.getOperand(0)))
1215 .addImm(DL->getABITypeAlignment(U.getType()));
1219 bool IRTranslator::translateInsertElement(const User &U,
1220 MachineIRBuilder &MIRBuilder) {
1221 // If it is a <1 x Ty> vector, use the scalar as it is
1222 // not a legal vector type in LLT.
1223 if (U.getType()->getVectorNumElements() == 1) {
1224 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1225 auto &Regs = *VMap.getVRegs(U);
1227 Regs.push_back(Elt);
1228 VMap.getOffsets(U)->push_back(0);
1230 MIRBuilder.buildCopy(Regs[0], Elt);
1235 unsigned Res = getOrCreateVReg(U);
1236 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1237 unsigned Elt = getOrCreateVReg(*U.getOperand(1));
1238 unsigned Idx = getOrCreateVReg(*U.getOperand(2));
1239 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1243 bool IRTranslator::translateExtractElement(const User &U,
1244 MachineIRBuilder &MIRBuilder) {
1245 // If it is a <1 x Ty> vector, use the scalar as it is
1246 // not a legal vector type in LLT.
1247 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1248 unsigned Elt = getOrCreateVReg(*U.getOperand(0));
1249 auto &Regs = *VMap.getVRegs(U);
1251 Regs.push_back(Elt);
1252 VMap.getOffsets(U)->push_back(0);
1254 MIRBuilder.buildCopy(Regs[0], Elt);
1258 unsigned Res = getOrCreateVReg(U);
1259 unsigned Val = getOrCreateVReg(*U.getOperand(0));
1260 unsigned Idx = getOrCreateVReg(*U.getOperand(1));
1261 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1265 bool IRTranslator::translateShuffleVector(const User &U,
1266 MachineIRBuilder &MIRBuilder) {
1267 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1268 .addDef(getOrCreateVReg(U))
1269 .addUse(getOrCreateVReg(*U.getOperand(0)))
1270 .addUse(getOrCreateVReg(*U.getOperand(1)))
1271 .addUse(getOrCreateVReg(*U.getOperand(2)));
1275 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1276 const PHINode &PI = cast<PHINode>(U);
1278 SmallVector<MachineInstr *, 4> Insts;
1279 for (auto Reg : getOrCreateVRegs(PI)) {
1280 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, Reg);
1281 Insts.push_back(MIB.getInstr());
1284 PendingPHIs.emplace_back(&PI, std::move(Insts));
1288 void IRTranslator::finishPendingPhis() {
1289 for (auto &Phi : PendingPHIs) {
1290 const PHINode *PI = Phi.first;
1291 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
1293 // All MachineBasicBlocks exist, add them to the PHI. We assume IRTranslator
1294 // won't create extra control flow here, otherwise we need to find the
1295 // dominating predecessor here (or perhaps force the weirder IRTranslators
1296 // to provide a simple boundary).
1297 SmallSet<const BasicBlock *, 4> HandledPreds;
1299 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
1300 auto IRPred = PI->getIncomingBlock(i);
1301 if (HandledPreds.count(IRPred))
1304 HandledPreds.insert(IRPred);
1305 ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
1306 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
1307 assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) &&
1308 "incorrect CFG at MachineBasicBlock level");
1309 for (unsigned j = 0; j < ValRegs.size(); ++j) {
1310 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
1311 MIB.addUse(ValRegs[j]);
1319 bool IRTranslator::valueIsSplit(const Value &V,
1320 SmallVectorImpl<uint64_t> *Offsets) {
1321 SmallVector<LLT, 4> SplitTys;
1322 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
1323 return SplitTys.size() > 1;
1326 bool IRTranslator::translate(const Instruction &Inst) {
1327 CurBuilder.setDebugLoc(Inst.getDebugLoc());
1328 switch(Inst.getOpcode()) {
1329 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1330 case Instruction::OPCODE: return translate##OPCODE(Inst, CurBuilder);
1331 #include "llvm/IR/Instruction.def"
1337 bool IRTranslator::translate(const Constant &C, unsigned Reg) {
1338 if (auto CI = dyn_cast<ConstantInt>(&C))
1339 EntryBuilder.buildConstant(Reg, *CI);
1340 else if (auto CF = dyn_cast<ConstantFP>(&C))
1341 EntryBuilder.buildFConstant(Reg, *CF);
1342 else if (isa<UndefValue>(C))
1343 EntryBuilder.buildUndef(Reg);
1344 else if (isa<ConstantPointerNull>(C)) {
1345 // As we are trying to build a constant val of 0 into a pointer,
1346 // insert a cast to make them correct with respect to types.
1347 unsigned NullSize = DL->getTypeSizeInBits(C.getType());
1348 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
1349 auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
1350 unsigned ZeroReg = getOrCreateVReg(*ZeroVal);
1351 EntryBuilder.buildCast(Reg, ZeroReg);
1352 } else if (auto GV = dyn_cast<GlobalValue>(&C))
1353 EntryBuilder.buildGlobalValue(Reg, GV);
1354 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
1355 if (!CAZ->getType()->isVectorTy())
1357 // Return the scalar if it is a <1 x Ty> vector.
1358 if (CAZ->getNumElements() == 1)
1359 return translate(*CAZ->getElementValue(0u), Reg);
1360 std::vector<unsigned> Ops;
1361 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
1362 Constant &Elt = *CAZ->getElementValue(i);
1363 Ops.push_back(getOrCreateVReg(Elt));
1365 EntryBuilder.buildMerge(Reg, Ops);
1366 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
1367 // Return the scalar if it is a <1 x Ty> vector.
1368 if (CV->getNumElements() == 1)
1369 return translate(*CV->getElementAsConstant(0), Reg);
1370 std::vector<unsigned> Ops;
1371 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
1372 Constant &Elt = *CV->getElementAsConstant(i);
1373 Ops.push_back(getOrCreateVReg(Elt));
1375 EntryBuilder.buildMerge(Reg, Ops);
1376 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
1377 switch(CE->getOpcode()) {
1378 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1379 case Instruction::OPCODE: return translate##OPCODE(*CE, EntryBuilder);
1380 #include "llvm/IR/Instruction.def"
1384 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
1385 if (CV->getNumOperands() == 1)
1386 return translate(*CV->getOperand(0), Reg);
1387 SmallVector<unsigned, 4> Ops;
1388 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
1389 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
1391 EntryBuilder.buildMerge(Reg, Ops);
1398 void IRTranslator::finalizeFunction() {
1399 // Release the memory used by the different maps we
1400 // needed during the translation.
1401 PendingPHIs.clear();
1403 FrameIndices.clear();
1404 MachinePreds.clear();
1405 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
1406 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
1407 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
1408 EntryBuilder = MachineIRBuilder();
1409 CurBuilder = MachineIRBuilder();
1412 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
1414 const Function &F = MF->getFunction();
1417 CLI = MF->getSubtarget().getCallLowering();
1418 CurBuilder.setMF(*MF);
1419 EntryBuilder.setMF(*MF);
1420 MRI = &MF->getRegInfo();
1421 DL = &F.getParent()->getDataLayout();
1422 TPC = &getAnalysis<TargetPassConfig>();
1423 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
1425 assert(PendingPHIs.empty() && "stale PHIs");
1427 if (!DL->isLittleEndian()) {
1428 // Currently we don't properly handle big endian code.
1429 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1430 F.getSubprogram(), &F.getEntryBlock());
1431 R << "unable to translate in big endian mode";
1432 reportTranslationError(*MF, *TPC, *ORE, R);
1435 // Release the per-function state when we return, whether we succeeded or not.
1436 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
1438 // Setup a separate basic-block for the arguments and constants
1439 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
1440 MF->push_back(EntryBB);
1441 EntryBuilder.setMBB(*EntryBB);
1443 // Create all blocks, in IR order, to preserve the layout.
1444 for (const BasicBlock &BB: F) {
1445 auto *&MBB = BBToMBB[&BB];
1447 MBB = MF->CreateMachineBasicBlock(&BB);
1450 if (BB.hasAddressTaken())
1451 MBB->setHasAddressTaken();
1454 // Make our arguments/constants entry block fallthrough to the IR entry block.
1455 EntryBB->addSuccessor(&getMBB(F.front()));
1457 // Lower the actual args into this basic block.
1458 SmallVector<unsigned, 8> VRegArgs;
1459 for (const Argument &Arg: F.args()) {
1460 if (DL->getTypeStoreSize(Arg.getType()) == 0)
1461 continue; // Don't handle zero sized types.
1463 MRI->createGenericVirtualRegister(getLLTForType(*Arg.getType(), *DL)));
1466 if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
1467 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1468 F.getSubprogram(), &F.getEntryBlock());
1469 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
1470 reportTranslationError(*MF, *TPC, *ORE, R);
1474 auto ArgIt = F.arg_begin();
1475 for (auto &VArg : VRegArgs) {
1476 // If the argument is an unsplit scalar then don't use unpackRegs to avoid
1477 // creating redundant copies.
1478 if (!valueIsSplit(*ArgIt, VMap.getOffsets(*ArgIt))) {
1479 auto &VRegs = *VMap.getVRegs(cast<Value>(*ArgIt));
1480 assert(VRegs.empty() && "VRegs already populated?");
1481 VRegs.push_back(VArg);
1483 unpackRegs(*ArgIt, VArg, EntryBuilder);
1488 // And translate the function!
1489 for (const BasicBlock &BB : F) {
1490 MachineBasicBlock &MBB = getMBB(BB);
1491 // Set the insertion point of all the following translations to
1492 // the end of this basic block.
1493 CurBuilder.setMBB(MBB);
1495 for (const Instruction &Inst : BB) {
1496 if (translate(Inst))
1499 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1500 Inst.getDebugLoc(), &BB);
1501 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
1503 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
1504 std::string InstStrStorage;
1505 raw_string_ostream InstStr(InstStrStorage);
1508 R << ": '" << InstStr.str() << "'";
1511 reportTranslationError(*MF, *TPC, *ORE, R);
1516 finishPendingPhis();
1518 // Merge the argument lowering and constants block with its single
1519 // successor, the LLVM-IR entry block. We want the basic block to
1521 assert(EntryBB->succ_size() == 1 &&
1522 "Custom BB used for lowering should have only one successor");
1523 // Get the successor of the current entry block.
1524 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
1525 assert(NewEntryBB.pred_size() == 1 &&
1526 "LLVM-IR entry block has a predecessor!?");
1527 // Move all the instruction from the current entry block to the
1529 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
1532 // Update the live-in information for the new entry block.
1533 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
1534 NewEntryBB.addLiveIn(LiveIn);
1535 NewEntryBB.sortUniqueLiveIns();
1537 // Get rid of the now empty basic block.
1538 EntryBB->removeSuccessor(&NewEntryBB);
1539 MF->remove(EntryBB);
1540 MF->DeleteMachineBasicBlock(EntryBB);
1542 assert(&MF->front() == &NewEntryBB &&
1543 "New entry wasn't next in the list of basic block!");