1 //===- X86InstructionSelector.cpp -----------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the targeting of the InstructionSelector class for
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
15 #include "MCTargetDesc/X86BaseInfo.h"
16 #include "X86InstrBuilder.h"
17 #include "X86InstrInfo.h"
18 #include "X86RegisterBankInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
25 #include "llvm/CodeGen/GlobalISel/Utils.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineConstantPool.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/InstrTypes.h"
36 #include "llvm/Support/AtomicOrdering.h"
37 #include "llvm/Support/CodeGen.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/LowLevelTypeImpl.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Target/TargetOpcodes.h"
44 #include "llvm/Target/TargetRegisterInfo.h"
49 #define DEBUG_TYPE "X86-isel"
55 #define GET_GLOBALISEL_PREDICATE_BITSET
56 #include "X86GenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATE_BITSET
59 class X86InstructionSelector : public InstructionSelector {
61 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
62 const X86RegisterBankInfo &RBI);
64 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
65 static const char *getName() { return DEBUG_TYPE; }
68 /// tblgen-erated 'select' implementation, used as the initial selector for
69 /// the patterns that don't require complex C++.
70 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
72 // TODO: remove after supported by Tablegen-erated instruction selection.
73 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
74 uint64_t Alignment) const;
76 bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
77 MachineFunction &MF) const;
78 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
79 MachineFunction &MF) const;
80 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
81 MachineFunction &MF) const;
82 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
83 MachineFunction &MF) const;
84 bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI,
85 MachineFunction &MF) const;
86 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
87 MachineFunction &MF) const;
88 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
89 MachineFunction &MF) const;
90 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
91 MachineFunction &MF) const;
92 bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
93 MachineFunction &MF) const;
94 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
95 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
97 CodeGenCoverage &CoverageInfo) const;
98 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
100 CodeGenCoverage &CoverageInfo) const;
101 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
102 MachineFunction &MF) const;
103 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
104 MachineFunction &MF) const;
105 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
106 MachineFunction &MF) const;
107 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
108 MachineFunction &MF) const;
109 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
111 // emit insert subreg instruction and insert it before MachineInstr &I
112 bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
113 MachineRegisterInfo &MRI, MachineFunction &MF) const;
114 // emit extract subreg instruction and insert it before MachineInstr &I
115 bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
116 MachineRegisterInfo &MRI, MachineFunction &MF) const;
118 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
119 const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
120 MachineRegisterInfo &MRI) const;
122 const X86TargetMachine &TM;
123 const X86Subtarget &STI;
124 const X86InstrInfo &TII;
125 const X86RegisterInfo &TRI;
126 const X86RegisterBankInfo &RBI;
128 #define GET_GLOBALISEL_PREDICATES_DECL
129 #include "X86GenGlobalISel.inc"
130 #undef GET_GLOBALISEL_PREDICATES_DECL
132 #define GET_GLOBALISEL_TEMPORARIES_DECL
133 #include "X86GenGlobalISel.inc"
134 #undef GET_GLOBALISEL_TEMPORARIES_DECL
137 } // end anonymous namespace
139 #define GET_GLOBALISEL_IMPL
140 #include "X86GenGlobalISel.inc"
141 #undef GET_GLOBALISEL_IMPL
143 X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
144 const X86Subtarget &STI,
145 const X86RegisterBankInfo &RBI)
146 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
147 TRI(*STI.getRegisterInfo()), RBI(RBI),
148 #define GET_GLOBALISEL_PREDICATES_INIT
149 #include "X86GenGlobalISel.inc"
150 #undef GET_GLOBALISEL_PREDICATES_INIT
151 #define GET_GLOBALISEL_TEMPORARIES_INIT
152 #include "X86GenGlobalISel.inc"
153 #undef GET_GLOBALISEL_TEMPORARIES_INIT
157 // FIXME: This should be target-independent, inferred from the types declared
158 // for each class in the bank.
159 const TargetRegisterClass *
160 X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
161 if (RB.getID() == X86::GPRRegBankID) {
162 if (Ty.getSizeInBits() <= 8)
163 return &X86::GR8RegClass;
164 if (Ty.getSizeInBits() == 16)
165 return &X86::GR16RegClass;
166 if (Ty.getSizeInBits() == 32)
167 return &X86::GR32RegClass;
168 if (Ty.getSizeInBits() == 64)
169 return &X86::GR64RegClass;
171 if (RB.getID() == X86::VECRRegBankID) {
172 if (Ty.getSizeInBits() == 32)
173 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
174 if (Ty.getSizeInBits() == 64)
175 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
176 if (Ty.getSizeInBits() == 128)
177 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
178 if (Ty.getSizeInBits() == 256)
179 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
180 if (Ty.getSizeInBits() == 512)
181 return &X86::VR512RegClass;
184 llvm_unreachable("Unknown RegBank!");
187 const TargetRegisterClass *
188 X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
189 MachineRegisterInfo &MRI) const {
190 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
191 return getRegClass(Ty, RegBank);
194 static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
195 unsigned SubIdx = X86::NoSubRegister;
196 if (RC == &X86::GR32RegClass) {
197 SubIdx = X86::sub_32bit;
198 } else if (RC == &X86::GR16RegClass) {
199 SubIdx = X86::sub_16bit;
200 } else if (RC == &X86::GR8RegClass) {
201 SubIdx = X86::sub_8bit;
207 static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
208 assert(TargetRegisterInfo::isPhysicalRegister(Reg));
209 if (X86::GR64RegClass.contains(Reg))
210 return &X86::GR64RegClass;
211 if (X86::GR32RegClass.contains(Reg))
212 return &X86::GR32RegClass;
213 if (X86::GR16RegClass.contains(Reg))
214 return &X86::GR16RegClass;
215 if (X86::GR8RegClass.contains(Reg))
216 return &X86::GR8RegClass;
218 llvm_unreachable("Unknown RegClass for PhysReg!");
221 // Set X86 Opcode and constrain DestReg.
222 bool X86InstructionSelector::selectCopy(MachineInstr &I,
223 MachineRegisterInfo &MRI) const {
224 unsigned DstReg = I.getOperand(0).getReg();
225 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
226 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
228 unsigned SrcReg = I.getOperand(1).getReg();
229 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
230 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
232 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
233 assert(I.isCopy() && "Generic operators do not allow physical registers");
235 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
236 DstRegBank.getID() == X86::GPRRegBankID) {
238 const TargetRegisterClass *SrcRC =
239 getRegClass(MRI.getType(SrcReg), SrcRegBank);
240 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
242 if (SrcRC != DstRC) {
243 // This case can be generated by ABI lowering, performe anyext
244 unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
245 BuildMI(*I.getParent(), I, I.getDebugLoc(),
246 TII.get(TargetOpcode::SUBREG_TO_REG))
250 .addImm(getSubRegIndex(SrcRC));
252 I.getOperand(1).setReg(ExtSrc);
259 assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
260 "No phys reg on generic operators");
261 assert((DstSize == SrcSize ||
262 // Copies are a mean to setup initial types, the number of
263 // bits may not exactly match.
264 (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
265 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
266 "Copy with different width?!");
268 const TargetRegisterClass *DstRC =
269 getRegClass(MRI.getType(DstReg), DstRegBank);
271 if (SrcRegBank.getID() == X86::GPRRegBankID &&
272 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
273 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
274 // Change the physical register to performe truncate.
276 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
278 if (DstRC != SrcRC) {
279 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
280 I.getOperand(1).substPhysReg(SrcReg, TRI);
284 // No need to constrain SrcReg. It will get constrained when
285 // we hit another of its use or its defs.
286 // Copies do not have constraints.
287 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
288 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
289 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
290 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
295 I.setDesc(TII.get(X86::COPY));
299 bool X86InstructionSelector::select(MachineInstr &I,
300 CodeGenCoverage &CoverageInfo) const {
301 assert(I.getParent() && "Instruction should be in a basic block!");
302 assert(I.getParent()->getParent() && "Instruction should be in a function!");
304 MachineBasicBlock &MBB = *I.getParent();
305 MachineFunction &MF = *MBB.getParent();
306 MachineRegisterInfo &MRI = MF.getRegInfo();
308 unsigned Opcode = I.getOpcode();
309 if (!isPreISelGenericOpcode(Opcode)) {
310 // Certain non-generic instructions also need some special handling.
312 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
316 return selectCopy(I, MRI);
321 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
322 "Generic instruction has unexpected implicit operands\n");
324 if (selectImpl(I, CoverageInfo))
327 DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
329 // TODO: This should be implemented by tblgen.
330 switch (I.getOpcode()) {
333 case TargetOpcode::G_STORE:
334 case TargetOpcode::G_LOAD:
335 return selectLoadStoreOp(I, MRI, MF);
336 case TargetOpcode::G_GEP:
337 case TargetOpcode::G_FRAME_INDEX:
338 return selectFrameIndexOrGep(I, MRI, MF);
339 case TargetOpcode::G_GLOBAL_VALUE:
340 return selectGlobalValue(I, MRI, MF);
341 case TargetOpcode::G_CONSTANT:
342 return selectConstant(I, MRI, MF);
343 case TargetOpcode::G_FCONSTANT:
344 return materializeFP(I, MRI, MF);
345 case TargetOpcode::G_TRUNC:
346 return selectTrunc(I, MRI, MF);
347 case TargetOpcode::G_ZEXT:
348 return selectZext(I, MRI, MF);
349 case TargetOpcode::G_ANYEXT:
350 return selectAnyext(I, MRI, MF);
351 case TargetOpcode::G_ICMP:
352 return selectCmp(I, MRI, MF);
353 case TargetOpcode::G_UADDE:
354 return selectUadde(I, MRI, MF);
355 case TargetOpcode::G_UNMERGE_VALUES:
356 return selectUnmergeValues(I, MRI, MF, CoverageInfo);
357 case TargetOpcode::G_MERGE_VALUES:
358 return selectMergeValues(I, MRI, MF, CoverageInfo);
359 case TargetOpcode::G_EXTRACT:
360 return selectExtract(I, MRI, MF);
361 case TargetOpcode::G_INSERT:
362 return selectInsert(I, MRI, MF);
363 case TargetOpcode::G_BRCOND:
364 return selectCondBranch(I, MRI, MF);
365 case TargetOpcode::G_IMPLICIT_DEF:
366 case TargetOpcode::G_PHI:
367 return selectImplicitDefOrPHI(I, MRI);
373 unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
374 const RegisterBank &RB,
376 uint64_t Alignment) const {
377 bool Isload = (Opc == TargetOpcode::G_LOAD);
378 bool HasAVX = STI.hasAVX();
379 bool HasAVX512 = STI.hasAVX512();
380 bool HasVLX = STI.hasVLX();
382 if (Ty == LLT::scalar(8)) {
383 if (X86::GPRRegBankID == RB.getID())
384 return Isload ? X86::MOV8rm : X86::MOV8mr;
385 } else if (Ty == LLT::scalar(16)) {
386 if (X86::GPRRegBankID == RB.getID())
387 return Isload ? X86::MOV16rm : X86::MOV16mr;
388 } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
389 if (X86::GPRRegBankID == RB.getID())
390 return Isload ? X86::MOV32rm : X86::MOV32mr;
391 if (X86::VECRRegBankID == RB.getID())
392 return Isload ? (HasAVX512 ? X86::VMOVSSZrm
393 : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
394 : (HasAVX512 ? X86::VMOVSSZmr
395 : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
396 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
397 if (X86::GPRRegBankID == RB.getID())
398 return Isload ? X86::MOV64rm : X86::MOV64mr;
399 if (X86::VECRRegBankID == RB.getID())
400 return Isload ? (HasAVX512 ? X86::VMOVSDZrm
401 : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
402 : (HasAVX512 ? X86::VMOVSDZmr
403 : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
404 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
406 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
408 ? X86::VMOVAPSZ128rm_NOVLX
409 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
410 : (HasVLX ? X86::VMOVAPSZ128mr
412 ? X86::VMOVAPSZ128mr_NOVLX
413 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
415 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
417 ? X86::VMOVUPSZ128rm_NOVLX
418 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
419 : (HasVLX ? X86::VMOVUPSZ128mr
421 ? X86::VMOVUPSZ128mr_NOVLX
422 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
423 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
425 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
426 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
428 : (HasVLX ? X86::VMOVAPSZ256mr
429 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
432 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
433 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
435 : (HasVLX ? X86::VMOVUPSZ256mr
436 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
438 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
440 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
442 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
447 // Fill in an address from the given instruction.
448 static void X86SelectAddress(const MachineInstr &I,
449 const MachineRegisterInfo &MRI,
450 X86AddressMode &AM) {
451 assert(I.getOperand(0).isReg() && "unsupported opperand.");
452 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
453 "unsupported type.");
455 if (I.getOpcode() == TargetOpcode::G_GEP) {
456 if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
458 if (isInt<32>(Imm)) { // Check for displacement overflow.
459 AM.Disp = static_cast<int32_t>(Imm);
460 AM.Base.Reg = I.getOperand(1).getReg();
464 } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
465 AM.Base.FrameIndex = I.getOperand(1).getIndex();
466 AM.BaseType = X86AddressMode::FrameIndexBase;
471 AM.Base.Reg = I.getOperand(0).getReg();
474 bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
475 MachineRegisterInfo &MRI,
476 MachineFunction &MF) const {
477 unsigned Opc = I.getOpcode();
479 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
480 "unexpected instruction");
482 const unsigned DefReg = I.getOperand(0).getReg();
483 LLT Ty = MRI.getType(DefReg);
484 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
486 auto &MemOp = **I.memoperands_begin();
487 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
488 DEBUG(dbgs() << "Atomic load/store not supported yet\n");
492 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
497 X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
499 I.setDesc(TII.get(NewOpc));
500 MachineInstrBuilder MIB(MF, I);
501 if (Opc == TargetOpcode::G_LOAD) {
503 addFullAddress(MIB, AM);
505 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
508 addFullAddress(MIB, AM).addUse(DefReg);
510 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
513 static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
514 if (Ty == LLT::pointer(0, 64))
516 else if (Ty == LLT::pointer(0, 32))
517 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
519 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
522 bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
523 MachineRegisterInfo &MRI,
524 MachineFunction &MF) const {
525 unsigned Opc = I.getOpcode();
527 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
528 "unexpected instruction");
530 const unsigned DefReg = I.getOperand(0).getReg();
531 LLT Ty = MRI.getType(DefReg);
533 // Use LEA to calculate frame index and GEP
534 unsigned NewOpc = getLeaOP(Ty, STI);
535 I.setDesc(TII.get(NewOpc));
536 MachineInstrBuilder MIB(MF, I);
538 if (Opc == TargetOpcode::G_FRAME_INDEX) {
541 MachineOperand &InxOp = I.getOperand(2);
542 I.addOperand(InxOp); // set IndexReg
543 InxOp.ChangeToImmediate(1); // set Scale
544 MIB.addImm(0).addReg(0);
547 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
550 bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
551 MachineRegisterInfo &MRI,
552 MachineFunction &MF) const {
553 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
554 "unexpected instruction");
556 auto GV = I.getOperand(1).getGlobal();
557 if (GV->isThreadLocal()) {
558 return false; // TODO: we don't support TLS yet.
561 // Can't handle alternate code models yet.
562 if (TM.getCodeModel() != CodeModel::Small)
567 AM.GVOpFlags = STI.classifyGlobalReference(GV);
569 // TODO: The ABI requires an extra load. not supported yet.
570 if (isGlobalStubReference(AM.GVOpFlags))
573 // TODO: This reference is relative to the pic base. not supported yet.
574 if (isGlobalRelativeToPICBase(AM.GVOpFlags))
577 if (STI.isPICStyleRIPRel()) {
578 // Use rip-relative addressing.
579 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
580 AM.Base.Reg = X86::RIP;
583 const unsigned DefReg = I.getOperand(0).getReg();
584 LLT Ty = MRI.getType(DefReg);
585 unsigned NewOpc = getLeaOP(Ty, STI);
587 I.setDesc(TII.get(NewOpc));
588 MachineInstrBuilder MIB(MF, I);
591 addFullAddress(MIB, AM);
593 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
596 bool X86InstructionSelector::selectConstant(MachineInstr &I,
597 MachineRegisterInfo &MRI,
598 MachineFunction &MF) const {
599 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
600 "unexpected instruction");
602 const unsigned DefReg = I.getOperand(0).getReg();
603 LLT Ty = MRI.getType(DefReg);
605 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
609 if (I.getOperand(1).isCImm()) {
610 Val = I.getOperand(1).getCImm()->getZExtValue();
611 I.getOperand(1).ChangeToImmediate(Val);
612 } else if (I.getOperand(1).isImm()) {
613 Val = I.getOperand(1).getImm();
615 llvm_unreachable("Unsupported operand type.");
618 switch (Ty.getSizeInBits()) {
620 NewOpc = X86::MOV8ri;
623 NewOpc = X86::MOV16ri;
626 NewOpc = X86::MOV32ri;
629 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
631 NewOpc = X86::MOV64ri32;
633 NewOpc = X86::MOV64ri;
636 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
639 I.setDesc(TII.get(NewOpc));
640 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
643 bool X86InstructionSelector::selectTrunc(MachineInstr &I,
644 MachineRegisterInfo &MRI,
645 MachineFunction &MF) const {
646 assert((I.getOpcode() == TargetOpcode::G_TRUNC) && "unexpected instruction");
648 const unsigned DstReg = I.getOperand(0).getReg();
649 const unsigned SrcReg = I.getOperand(1).getReg();
651 const LLT DstTy = MRI.getType(DstReg);
652 const LLT SrcTy = MRI.getType(SrcReg);
654 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
655 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
657 if (DstRB.getID() != SrcRB.getID()) {
658 DEBUG(dbgs() << "G_TRUNC input/output on different banks\n");
662 if (DstRB.getID() != X86::GPRRegBankID)
665 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
669 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
674 if (DstRC == SrcRC) {
675 // Nothing to be done
676 SubIdx = X86::NoSubRegister;
677 } else if (DstRC == &X86::GR32RegClass) {
678 SubIdx = X86::sub_32bit;
679 } else if (DstRC == &X86::GR16RegClass) {
680 SubIdx = X86::sub_16bit;
681 } else if (DstRC == &X86::GR8RegClass) {
682 SubIdx = X86::sub_8bit;
687 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
689 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
690 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
691 DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
695 I.getOperand(1).setSubReg(SubIdx);
697 I.setDesc(TII.get(X86::COPY));
701 bool X86InstructionSelector::selectZext(MachineInstr &I,
702 MachineRegisterInfo &MRI,
703 MachineFunction &MF) const {
704 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
706 const unsigned DstReg = I.getOperand(0).getReg();
707 const unsigned SrcReg = I.getOperand(1).getReg();
709 const LLT DstTy = MRI.getType(DstReg);
710 const LLT SrcTy = MRI.getType(SrcReg);
712 if (SrcTy != LLT::scalar(1))
716 if (DstTy == LLT::scalar(8))
717 AndOpc = X86::AND8ri;
718 else if (DstTy == LLT::scalar(16))
719 AndOpc = X86::AND16ri8;
720 else if (DstTy == LLT::scalar(32))
721 AndOpc = X86::AND32ri8;
722 else if (DstTy == LLT::scalar(64))
723 AndOpc = X86::AND64ri8;
727 unsigned DefReg = SrcReg;
728 if (DstTy != LLT::scalar(8)) {
729 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
730 BuildMI(*I.getParent(), I, I.getDebugLoc(),
731 TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
734 .addImm(X86::sub_8bit);
737 MachineInstr &AndInst =
738 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
742 constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
748 bool X86InstructionSelector::selectAnyext(MachineInstr &I,
749 MachineRegisterInfo &MRI,
750 MachineFunction &MF) const {
751 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
753 const unsigned DstReg = I.getOperand(0).getReg();
754 const unsigned SrcReg = I.getOperand(1).getReg();
756 const LLT DstTy = MRI.getType(DstReg);
757 const LLT SrcTy = MRI.getType(SrcReg);
759 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
760 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
762 assert(DstRB.getID() == SrcRB.getID() &&
763 "G_ANYEXT input/output on different banks\n");
765 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
766 "G_ANYEXT incorrect operand size");
768 if (DstRB.getID() != X86::GPRRegBankID)
771 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
772 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
774 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
775 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
776 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
781 if (SrcRC == DstRC) {
782 I.setDesc(TII.get(X86::COPY));
786 BuildMI(*I.getParent(), I, I.getDebugLoc(),
787 TII.get(TargetOpcode::SUBREG_TO_REG))
791 .addImm(getSubRegIndex(SrcRC));
797 bool X86InstructionSelector::selectCmp(MachineInstr &I,
798 MachineRegisterInfo &MRI,
799 MachineFunction &MF) const {
800 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
804 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
805 (CmpInst::Predicate)I.getOperand(1).getPredicate());
806 unsigned OpSet = X86::getSETFromCond(CC);
808 unsigned LHS = I.getOperand(2).getReg();
809 unsigned RHS = I.getOperand(3).getReg();
815 LLT Ty = MRI.getType(LHS);
817 switch (Ty.getSizeInBits()) {
824 OpCmp = X86::CMP16rr;
827 OpCmp = X86::CMP32rr;
830 OpCmp = X86::CMP64rr;
834 MachineInstr &CmpInst =
835 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
839 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
840 TII.get(OpSet), I.getOperand(0).getReg());
842 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
843 constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
849 bool X86InstructionSelector::selectUadde(MachineInstr &I,
850 MachineRegisterInfo &MRI,
851 MachineFunction &MF) const {
852 assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
854 const unsigned DstReg = I.getOperand(0).getReg();
855 const unsigned CarryOutReg = I.getOperand(1).getReg();
856 const unsigned Op0Reg = I.getOperand(2).getReg();
857 const unsigned Op1Reg = I.getOperand(3).getReg();
858 unsigned CarryInReg = I.getOperand(4).getReg();
860 const LLT DstTy = MRI.getType(DstReg);
862 if (DstTy != LLT::scalar(32))
865 // find CarryIn def instruction.
866 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
867 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
868 CarryInReg = Def->getOperand(1).getReg();
869 Def = MRI.getVRegDef(CarryInReg);
873 if (Def->getOpcode() == TargetOpcode::G_UADDE) {
874 // carry set by prev ADD.
876 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
879 if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
882 Opcode = X86::ADC32rr;
883 } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
884 // carry is constant, support only 0.
888 Opcode = X86::ADD32rr;
892 MachineInstr &AddInst =
893 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
897 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
898 .addReg(X86::EFLAGS);
900 if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
901 !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
908 bool X86InstructionSelector::selectExtract(MachineInstr &I,
909 MachineRegisterInfo &MRI,
910 MachineFunction &MF) const {
911 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
912 "unexpected instruction");
914 const unsigned DstReg = I.getOperand(0).getReg();
915 const unsigned SrcReg = I.getOperand(1).getReg();
916 int64_t Index = I.getOperand(2).getImm();
918 const LLT DstTy = MRI.getType(DstReg);
919 const LLT SrcTy = MRI.getType(SrcReg);
921 // Meanwile handle vector type only.
922 if (!DstTy.isVector())
925 if (Index % DstTy.getSizeInBits() != 0)
926 return false; // Not extract subvector.
929 // Replace by extract subreg copy.
930 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
937 bool HasAVX = STI.hasAVX();
938 bool HasAVX512 = STI.hasAVX512();
939 bool HasVLX = STI.hasVLX();
941 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
943 I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
945 I.setDesc(TII.get(X86::VEXTRACTF128rr));
948 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
949 if (DstTy.getSizeInBits() == 128)
950 I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
951 else if (DstTy.getSizeInBits() == 256)
952 I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
958 // Convert to X86 VEXTRACT immediate.
959 Index = Index / DstTy.getSizeInBits();
960 I.getOperand(2).setImm(Index);
962 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
965 bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
967 MachineRegisterInfo &MRI,
968 MachineFunction &MF) const {
969 const LLT DstTy = MRI.getType(DstReg);
970 const LLT SrcTy = MRI.getType(SrcReg);
971 unsigned SubIdx = X86::NoSubRegister;
973 if (!DstTy.isVector() || !SrcTy.isVector())
976 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
977 "Incorrect Src/Dst register size");
979 if (DstTy.getSizeInBits() == 128)
980 SubIdx = X86::sub_xmm;
981 else if (DstTy.getSizeInBits() == 256)
982 SubIdx = X86::sub_ymm;
986 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
987 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
989 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
991 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
992 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
993 DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
997 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
998 .addReg(SrcReg, 0, SubIdx);
1003 bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1005 MachineRegisterInfo &MRI,
1006 MachineFunction &MF) const {
1007 const LLT DstTy = MRI.getType(DstReg);
1008 const LLT SrcTy = MRI.getType(SrcReg);
1009 unsigned SubIdx = X86::NoSubRegister;
1011 // TODO: support scalar types
1012 if (!DstTy.isVector() || !SrcTy.isVector())
1015 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1016 "Incorrect Src/Dst register size");
1018 if (SrcTy.getSizeInBits() == 128)
1019 SubIdx = X86::sub_xmm;
1020 else if (SrcTy.getSizeInBits() == 256)
1021 SubIdx = X86::sub_ymm;
1025 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1026 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1028 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1029 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1030 DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1034 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1035 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1041 bool X86InstructionSelector::selectInsert(MachineInstr &I,
1042 MachineRegisterInfo &MRI,
1043 MachineFunction &MF) const {
1044 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1046 const unsigned DstReg = I.getOperand(0).getReg();
1047 const unsigned SrcReg = I.getOperand(1).getReg();
1048 const unsigned InsertReg = I.getOperand(2).getReg();
1049 int64_t Index = I.getOperand(3).getImm();
1051 const LLT DstTy = MRI.getType(DstReg);
1052 const LLT InsertRegTy = MRI.getType(InsertReg);
1054 // Meanwile handle vector type only.
1055 if (!DstTy.isVector())
1058 if (Index % InsertRegTy.getSizeInBits() != 0)
1059 return false; // Not insert subvector.
1061 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1062 // Replace by subreg copy.
1063 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1066 I.eraseFromParent();
1070 bool HasAVX = STI.hasAVX();
1071 bool HasAVX512 = STI.hasAVX512();
1072 bool HasVLX = STI.hasVLX();
1074 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1076 I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1078 I.setDesc(TII.get(X86::VINSERTF128rr));
1081 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1082 if (InsertRegTy.getSizeInBits() == 128)
1083 I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1084 else if (InsertRegTy.getSizeInBits() == 256)
1085 I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1091 // Convert to X86 VINSERT immediate.
1092 Index = Index / InsertRegTy.getSizeInBits();
1094 I.getOperand(3).setImm(Index);
1096 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1099 bool X86InstructionSelector::selectUnmergeValues(
1100 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1101 CodeGenCoverage &CoverageInfo) const {
1102 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1103 "unexpected instruction");
1105 // Split to extracts.
1106 unsigned NumDefs = I.getNumOperands() - 1;
1107 unsigned SrcReg = I.getOperand(NumDefs).getReg();
1108 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1110 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1111 MachineInstr &ExtrInst =
1112 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1113 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1115 .addImm(Idx * DefSize);
1117 if (!select(ExtrInst, CoverageInfo))
1121 I.eraseFromParent();
1125 bool X86InstructionSelector::selectMergeValues(
1126 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1127 CodeGenCoverage &CoverageInfo) const {
1128 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
1129 "unexpected instruction");
1131 // Split to inserts.
1132 unsigned DstReg = I.getOperand(0).getReg();
1133 unsigned SrcReg0 = I.getOperand(1).getReg();
1135 const LLT DstTy = MRI.getType(DstReg);
1136 const LLT SrcTy = MRI.getType(SrcReg0);
1137 unsigned SrcSize = SrcTy.getSizeInBits();
1139 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1141 // For the first src use insertSubReg.
1142 unsigned DefReg = MRI.createGenericVirtualRegister(DstTy);
1143 MRI.setRegBank(DefReg, RegBank);
1144 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1147 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1148 unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
1149 MRI.setRegBank(Tmp, RegBank);
1151 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1152 TII.get(TargetOpcode::G_INSERT), Tmp)
1154 .addReg(I.getOperand(Idx).getReg())
1155 .addImm((Idx - 1) * SrcSize);
1159 if (!select(InsertInst, CoverageInfo))
1163 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1164 TII.get(TargetOpcode::COPY), DstReg)
1167 if (!select(CopyInst, CoverageInfo))
1170 I.eraseFromParent();
1174 bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1175 MachineRegisterInfo &MRI,
1176 MachineFunction &MF) const {
1177 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1179 const unsigned CondReg = I.getOperand(0).getReg();
1180 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1182 MachineInstr &TestInst =
1183 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1186 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JNE_1))
1189 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1191 I.eraseFromParent();
1195 bool X86InstructionSelector::materializeFP(MachineInstr &I,
1196 MachineRegisterInfo &MRI,
1197 MachineFunction &MF) const {
1198 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1199 "unexpected instruction");
1201 // Can't handle alternate code models yet.
1202 CodeModel::Model CM = TM.getCodeModel();
1203 if (CM != CodeModel::Small && CM != CodeModel::Large)
1206 const unsigned DstReg = I.getOperand(0).getReg();
1207 const LLT DstTy = MRI.getType(DstReg);
1208 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1209 unsigned Align = DstTy.getSizeInBits();
1210 const DebugLoc &DbgLoc = I.getDebugLoc();
1212 unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
1214 // Create the load from the constant pool.
1215 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1216 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
1217 MachineInstr *LoadInst = nullptr;
1218 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1220 if (CM == CodeModel::Large && STI.is64Bit()) {
1221 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1222 // they cannot be folded into immediate fields.
1224 unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1225 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1226 .addConstantPoolIndex(CPI, 0, OpFlag);
1228 MachineMemOperand *MMO = MF.getMachineMemOperand(
1229 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1230 MF.getDataLayout().getPointerSize(), Align);
1233 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1235 .addMemOperand(MMO);
1237 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1238 // Handle the case when globals fit in our immediate field.
1239 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1241 // x86-32 PIC requires a PIC base register for constant pools.
1242 unsigned PICBase = 0;
1243 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1244 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1245 // In DAGISEL the code that initialize it generated by the CGBR pass.
1246 return false; // TODO support the mode.
1247 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1250 LoadInst = addConstantPoolReference(
1251 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1256 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1257 I.eraseFromParent();
1261 bool X86InstructionSelector::selectImplicitDefOrPHI(
1262 MachineInstr &I, MachineRegisterInfo &MRI) const {
1263 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1264 I.getOpcode() == TargetOpcode::G_PHI) &&
1265 "unexpected instruction");
1267 unsigned DstReg = I.getOperand(0).getReg();
1269 if (!MRI.getRegClassOrNull(DstReg)) {
1270 const LLT DstTy = MRI.getType(DstReg);
1271 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1273 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1274 DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1280 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1281 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1283 I.setDesc(TII.get(X86::PHI));
1288 InstructionSelector *
1289 llvm::createX86InstructionSelector(const X86TargetMachine &TM,
1290 X86Subtarget &Subtarget,
1291 X86RegisterBankInfo &RBI) {
1292 return new X86InstructionSelector(TM, Subtarget, RBI);