1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/Triple.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/Analysis/AliasAnalysis.h"
31 #include "llvm/Analysis/BranchProbabilityInfo.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/EHPersonalities.h"
34 #include "llvm/Analysis/Loads.h"
35 #include "llvm/Analysis/MemoryLocation.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/Analysis/VectorUtils.h"
39 #include "llvm/CodeGen/Analysis.h"
40 #include "llvm/CodeGen/FunctionLoweringInfo.h"
41 #include "llvm/CodeGen/GCMetadata.h"
42 #include "llvm/CodeGen/ISDOpcodes.h"
43 #include "llvm/CodeGen/MachineBasicBlock.h"
44 #include "llvm/CodeGen/MachineFrameInfo.h"
45 #include "llvm/CodeGen/MachineFunction.h"
46 #include "llvm/CodeGen/MachineInstr.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineJumpTableInfo.h"
49 #include "llvm/CodeGen/MachineMemOperand.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineOperand.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/CodeGen/RuntimeLibcalls.h"
54 #include "llvm/CodeGen/SelectionDAG.h"
55 #include "llvm/CodeGen/SelectionDAGNodes.h"
56 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
57 #include "llvm/CodeGen/StackMaps.h"
58 #include "llvm/CodeGen/TargetFrameLowering.h"
59 #include "llvm/CodeGen/TargetInstrInfo.h"
60 #include "llvm/CodeGen/TargetLowering.h"
61 #include "llvm/CodeGen/TargetOpcodes.h"
62 #include "llvm/CodeGen/TargetRegisterInfo.h"
63 #include "llvm/CodeGen/TargetSubtargetInfo.h"
64 #include "llvm/CodeGen/ValueTypes.h"
65 #include "llvm/CodeGen/WinEHFuncInfo.h"
66 #include "llvm/IR/Argument.h"
67 #include "llvm/IR/Attributes.h"
68 #include "llvm/IR/BasicBlock.h"
69 #include "llvm/IR/CFG.h"
70 #include "llvm/IR/CallSite.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/ConstantRange.h"
74 #include "llvm/IR/Constants.h"
75 #include "llvm/IR/DataLayout.h"
76 #include "llvm/IR/DebugInfoMetadata.h"
77 #include "llvm/IR/DebugLoc.h"
78 #include "llvm/IR/DerivedTypes.h"
79 #include "llvm/IR/Function.h"
80 #include "llvm/IR/GetElementPtrTypeIterator.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/Intrinsics.h"
87 #include "llvm/IR/LLVMContext.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Module.h"
90 #include "llvm/IR/Operator.h"
91 #include "llvm/IR/PatternMatch.h"
92 #include "llvm/IR/Statepoint.h"
93 #include "llvm/IR/Type.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/MC/MCContext.h"
97 #include "llvm/MC/MCSymbol.h"
98 #include "llvm/Support/AtomicOrdering.h"
99 #include "llvm/Support/BranchProbability.h"
100 #include "llvm/Support/Casting.h"
101 #include "llvm/Support/CodeGen.h"
102 #include "llvm/Support/CommandLine.h"
103 #include "llvm/Support/Compiler.h"
104 #include "llvm/Support/Debug.h"
105 #include "llvm/Support/ErrorHandling.h"
106 #include "llvm/Support/MachineValueType.h"
107 #include "llvm/Support/MathExtras.h"
108 #include "llvm/Support/raw_ostream.h"
109 #include "llvm/Target/TargetIntrinsicInfo.h"
110 #include "llvm/Target/TargetMachine.h"
111 #include "llvm/Target/TargetOptions.h"
124 using namespace llvm;
125 using namespace PatternMatch;
127 #define DEBUG_TYPE "isel"
129 /// LimitFloatPrecision - Generate low-precision inline sequences for
130 /// some float libcalls (6, 8 or 12 bits).
131 static unsigned LimitFloatPrecision;
133 static cl::opt<unsigned, true>
134 LimitFPPrecision("limit-float-precision",
135 cl::desc("Generate low-precision inline sequences "
136 "for some float libcalls"),
137 cl::location(LimitFloatPrecision), cl::Hidden,
140 static cl::opt<unsigned> SwitchPeelThreshold(
141 "switch-peel-threshold", cl::Hidden, cl::init(66),
142 cl::desc("Set the case probability threshold for peeling the case from a "
143 "switch statement. A value greater than 100 will void this "
146 // Limit the width of DAG chains. This is important in general to prevent
147 // DAG-based analysis from blowing up. For example, alias analysis and
148 // load clustering may not complete in reasonable time. It is difficult to
149 // recognize and avoid this situation within each individual analysis, and
150 // future analyses are likely to have the same behavior. Limiting DAG width is
151 // the safe approach and will be especially important with global DAGs.
153 // MaxParallelChains default is arbitrarily high to avoid affecting
154 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
155 // sequence over this should have been converted to llvm.memcpy by the
156 // frontend. It is easy to induce this behavior with .ll code such as:
157 // %buffer = alloca [4096 x i8]
158 // %data = load [4096 x i8]* %argPtr
159 // store [4096 x i8] %data, [4096 x i8]* %buffer
160 static const unsigned MaxParallelChains = 64;
162 // Return the calling convention if the Value passed requires ABI mangling as it
163 // is a parameter to a function or a return value from a function which is not
165 static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
166 if (auto *R = dyn_cast<ReturnInst>(V))
167 return R->getParent()->getParent()->getCallingConv();
169 if (auto *CI = dyn_cast<CallInst>(V)) {
170 const bool IsInlineAsm = CI->isInlineAsm();
171 const bool IsIndirectFunctionCall =
172 !IsInlineAsm && !CI->getCalledFunction();
174 // It is possible that the call instruction is an inline asm statement or an
175 // indirect function call in which case the return value of
176 // getCalledFunction() would be nullptr.
177 const bool IsInstrinsicCall =
178 !IsInlineAsm && !IsIndirectFunctionCall &&
179 CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
181 if (!IsInlineAsm && !IsInstrinsicCall)
182 return CI->getCallingConv();
188 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
189 const SDValue *Parts, unsigned NumParts,
190 MVT PartVT, EVT ValueVT, const Value *V,
191 Optional<CallingConv::ID> CC);
193 /// getCopyFromParts - Create a value that contains the specified legal parts
194 /// combined into the value they represent. If the parts combine to a type
195 /// larger than ValueVT then AssertOp can be used to specify whether the extra
196 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
197 /// (ISD::AssertSext).
198 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
199 const SDValue *Parts, unsigned NumParts,
200 MVT PartVT, EVT ValueVT, const Value *V,
201 Optional<CallingConv::ID> CC = None,
202 Optional<ISD::NodeType> AssertOp = None) {
203 if (ValueVT.isVector())
204 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
207 assert(NumParts > 0 && "No parts to assemble!");
208 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
209 SDValue Val = Parts[0];
212 // Assemble the value from multiple parts.
213 if (ValueVT.isInteger()) {
214 unsigned PartBits = PartVT.getSizeInBits();
215 unsigned ValueBits = ValueVT.getSizeInBits();
217 // Assemble the power of 2 part.
218 unsigned RoundParts = NumParts & (NumParts - 1) ?
219 1 << Log2_32(NumParts) : NumParts;
220 unsigned RoundBits = PartBits * RoundParts;
221 EVT RoundVT = RoundBits == ValueBits ?
222 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
225 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
227 if (RoundParts > 2) {
228 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
230 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
231 RoundParts / 2, PartVT, HalfVT, V);
233 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
234 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
237 if (DAG.getDataLayout().isBigEndian())
240 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
242 if (RoundParts < NumParts) {
243 // Assemble the trailing non-power-of-2 part.
244 unsigned OddParts = NumParts - RoundParts;
245 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
246 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
249 // Combine the round and odd parts.
251 if (DAG.getDataLayout().isBigEndian())
253 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
254 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
256 DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
257 DAG.getConstant(Lo.getValueSizeInBits(), DL,
258 TLI.getPointerTy(DAG.getDataLayout())));
259 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
260 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
262 } else if (PartVT.isFloatingPoint()) {
263 // FP split into multiple FP parts (for ppcf128)
264 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
267 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
268 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
269 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
271 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
273 // FP split into integer parts (soft fp)
274 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
275 !PartVT.isVector() && "Unexpected split");
276 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
277 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
281 // There is now one part, held in Val. Correct it to match ValueVT.
282 // PartEVT is the type of the register class that holds the value.
283 // ValueVT is the type of the inline asm operation.
284 EVT PartEVT = Val.getValueType();
286 if (PartEVT == ValueVT)
289 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
290 ValueVT.bitsLT(PartEVT)) {
291 // For an FP value in an integer part, we need to truncate to the right
293 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
294 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
297 // Handle types that have the same size.
298 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
299 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
301 // Handle types with different sizes.
302 if (PartEVT.isInteger() && ValueVT.isInteger()) {
303 if (ValueVT.bitsLT(PartEVT)) {
304 // For a truncate, see if we have any information to
305 // indicate whether the truncated bits will always be
306 // zero or sign-extension.
307 if (AssertOp.hasValue())
308 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
309 DAG.getValueType(ValueVT));
310 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
312 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
315 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
316 // FP_ROUND's are always exact here.
317 if (ValueVT.bitsLT(Val.getValueType()))
319 ISD::FP_ROUND, DL, ValueVT, Val,
320 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
322 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
325 llvm_unreachable("Unknown mismatch!");
328 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
329 const Twine &ErrMsg) {
330 const Instruction *I = dyn_cast_or_null<Instruction>(V);
332 return Ctx.emitError(ErrMsg);
334 const char *AsmError = ", possible invalid constraint for vector type";
335 if (const CallInst *CI = dyn_cast<CallInst>(I))
336 if (isa<InlineAsm>(CI->getCalledValue()))
337 return Ctx.emitError(I, ErrMsg + AsmError);
339 return Ctx.emitError(I, ErrMsg);
342 /// getCopyFromPartsVector - Create a value that contains the specified legal
343 /// parts combined into the value they represent. If the parts combine to a
344 /// type larger than ValueVT then AssertOp can be used to specify whether the
345 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
346 /// ValueVT (ISD::AssertSext).
347 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
348 const SDValue *Parts, unsigned NumParts,
349 MVT PartVT, EVT ValueVT, const Value *V,
350 Optional<CallingConv::ID> CallConv) {
351 assert(ValueVT.isVector() && "Not a vector value");
352 assert(NumParts > 0 && "No parts to assemble!");
353 const bool IsABIRegCopy = CallConv.hasValue();
355 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
356 SDValue Val = Parts[0];
358 // Handle a multi-element vector.
362 unsigned NumIntermediates;
366 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
367 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
368 NumIntermediates, RegisterVT);
371 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
372 NumIntermediates, RegisterVT);
375 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
376 NumParts = NumRegs; // Silence a compiler warning.
377 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
378 assert(RegisterVT.getSizeInBits() ==
379 Parts[0].getSimpleValueType().getSizeInBits() &&
380 "Part type sizes don't match!");
382 // Assemble the parts into intermediate operands.
383 SmallVector<SDValue, 8> Ops(NumIntermediates);
384 if (NumIntermediates == NumParts) {
385 // If the register was not expanded, truncate or copy the value,
387 for (unsigned i = 0; i != NumParts; ++i)
388 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
389 PartVT, IntermediateVT, V);
390 } else if (NumParts > 0) {
391 // If the intermediate type was expanded, build the intermediate
392 // operands from the parts.
393 assert(NumParts % NumIntermediates == 0 &&
394 "Must expand into a divisible number of parts!");
395 unsigned Factor = NumParts / NumIntermediates;
396 for (unsigned i = 0; i != NumIntermediates; ++i)
397 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
398 PartVT, IntermediateVT, V);
401 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
402 // intermediate operands.
404 EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
405 (IntermediateVT.isVector()
406 ? IntermediateVT.getVectorNumElements() * NumParts
407 : NumIntermediates));
408 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
410 DL, BuiltVectorTy, Ops);
413 // There is now one part, held in Val. Correct it to match ValueVT.
414 EVT PartEVT = Val.getValueType();
416 if (PartEVT == ValueVT)
419 if (PartEVT.isVector()) {
420 // If the element type of the source/dest vectors are the same, but the
421 // parts vector has more elements than the value vector, then we have a
422 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
424 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
425 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
426 "Cannot narrow, it would be a lossy transformation");
428 ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
429 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
432 // Vector/Vector bitcast.
433 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
434 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
436 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
437 "Cannot handle this kind of promotion");
438 // Promoted vector extract
439 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
443 // Trivial bitcast if the types are the same size and the destination
444 // vector type is legal.
445 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
446 TLI.isTypeLegal(ValueVT))
447 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
449 if (ValueVT.getVectorNumElements() != 1) {
450 // Certain ABIs require that vectors are passed as integers. For vectors
451 // are the same size, this is an obvious bitcast.
452 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
453 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
454 } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
455 // Bitcast Val back the original type and extract the corresponding
457 unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
458 EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
459 ValueVT.getVectorElementType(), Elts);
460 Val = DAG.getBitcast(WiderVecType, Val);
462 ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
463 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
466 diagnosePossiblyInvalidConstraint(
467 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
468 return DAG.getUNDEF(ValueVT);
471 // Handle cases such as i8 -> <1 x i1>
472 EVT ValueSVT = ValueVT.getVectorElementType();
473 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT)
474 Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
475 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
477 return DAG.getBuildVector(ValueVT, DL, Val);
480 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
481 SDValue Val, SDValue *Parts, unsigned NumParts,
482 MVT PartVT, const Value *V,
483 Optional<CallingConv::ID> CallConv);
485 /// getCopyToParts - Create a series of nodes that contain the specified value
486 /// split into legal parts. If the parts contain more bits than Val, then, for
487 /// integers, ExtendKind can be used to specify how to generate the extra bits.
488 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
489 SDValue *Parts, unsigned NumParts, MVT PartVT,
491 Optional<CallingConv::ID> CallConv = None,
492 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
493 EVT ValueVT = Val.getValueType();
495 // Handle the vector case separately.
496 if (ValueVT.isVector())
497 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
500 unsigned PartBits = PartVT.getSizeInBits();
501 unsigned OrigNumParts = NumParts;
502 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
503 "Copying to an illegal type!");
508 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
509 EVT PartEVT = PartVT;
510 if (PartEVT == ValueVT) {
511 assert(NumParts == 1 && "No-op copy with multiple parts!");
516 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
517 // If the parts cover more bits than the value has, promote the value.
518 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
519 assert(NumParts == 1 && "Do not know what to promote to!");
520 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
522 if (ValueVT.isFloatingPoint()) {
523 // FP values need to be bitcast, then extended if they are being put
524 // into a larger container.
525 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
526 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
528 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
529 ValueVT.isInteger() &&
530 "Unknown mismatch!");
531 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
532 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
533 if (PartVT == MVT::x86mmx)
534 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
536 } else if (PartBits == ValueVT.getSizeInBits()) {
537 // Different types of the same size.
538 assert(NumParts == 1 && PartEVT != ValueVT);
539 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
540 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
541 // If the parts cover less bits than value has, truncate the value.
542 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
543 ValueVT.isInteger() &&
544 "Unknown mismatch!");
545 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
546 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
547 if (PartVT == MVT::x86mmx)
548 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
551 // The value may have changed - recompute ValueVT.
552 ValueVT = Val.getValueType();
553 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
554 "Failed to tile the value with PartVT!");
557 if (PartEVT != ValueVT) {
558 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
559 "scalar-to-vector conversion failed");
560 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
567 // Expand the value into multiple parts.
568 if (NumParts & (NumParts - 1)) {
569 // The number of parts is not a power of 2. Split off and copy the tail.
570 assert(PartVT.isInteger() && ValueVT.isInteger() &&
571 "Do not know what to expand to!");
572 unsigned RoundParts = 1 << Log2_32(NumParts);
573 unsigned RoundBits = RoundParts * PartBits;
574 unsigned OddParts = NumParts - RoundParts;
575 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
576 DAG.getIntPtrConstant(RoundBits, DL));
577 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
580 if (DAG.getDataLayout().isBigEndian())
581 // The odd parts were reversed by getCopyToParts - unreverse them.
582 std::reverse(Parts + RoundParts, Parts + NumParts);
584 NumParts = RoundParts;
585 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
586 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
589 // The number of parts is a power of 2. Repeatedly bisect the value using
591 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
592 EVT::getIntegerVT(*DAG.getContext(),
593 ValueVT.getSizeInBits()),
596 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
597 for (unsigned i = 0; i < NumParts; i += StepSize) {
598 unsigned ThisBits = StepSize * PartBits / 2;
599 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
600 SDValue &Part0 = Parts[i];
601 SDValue &Part1 = Parts[i+StepSize/2];
603 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
604 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
605 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
606 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
608 if (ThisBits == PartBits && ThisVT != PartVT) {
609 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
610 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
615 if (DAG.getDataLayout().isBigEndian())
616 std::reverse(Parts, Parts + OrigNumParts);
619 static SDValue widenVectorToPartType(SelectionDAG &DAG,
620 SDValue Val, const SDLoc &DL, EVT PartVT) {
621 if (!PartVT.isVector())
624 EVT ValueVT = Val.getValueType();
625 unsigned PartNumElts = PartVT.getVectorNumElements();
626 unsigned ValueNumElts = ValueVT.getVectorNumElements();
627 if (PartNumElts > ValueNumElts &&
628 PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
629 EVT ElementVT = PartVT.getVectorElementType();
630 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
632 SmallVector<SDValue, 16> Ops;
633 DAG.ExtractVectorElements(Val, Ops);
634 SDValue EltUndef = DAG.getUNDEF(ElementVT);
635 for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
636 Ops.push_back(EltUndef);
638 // FIXME: Use CONCAT for 2x -> 4x.
639 return DAG.getBuildVector(PartVT, DL, Ops);
645 /// getCopyToPartsVector - Create a series of nodes that contain the specified
646 /// value split into legal parts.
647 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
648 SDValue Val, SDValue *Parts, unsigned NumParts,
649 MVT PartVT, const Value *V,
650 Optional<CallingConv::ID> CallConv) {
651 EVT ValueVT = Val.getValueType();
652 assert(ValueVT.isVector() && "Not a vector");
653 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
654 const bool IsABIRegCopy = CallConv.hasValue();
657 EVT PartEVT = PartVT;
658 if (PartEVT == ValueVT) {
660 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
661 // Bitconvert vector->vector case.
662 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
663 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
665 } else if (PartVT.isVector() &&
666 PartEVT.getVectorElementType().bitsGE(
667 ValueVT.getVectorElementType()) &&
668 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
670 // Promoted vector extract
671 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
673 if (ValueVT.getVectorNumElements() == 1) {
675 ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
676 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
678 assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
679 "lossy conversion of vector to scalar type");
680 EVT IntermediateType =
681 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
682 Val = DAG.getBitcast(IntermediateType, Val);
683 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
687 assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
692 // Handle a multi-element vector.
695 unsigned NumIntermediates;
698 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
699 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
700 NumIntermediates, RegisterVT);
703 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
704 NumIntermediates, RegisterVT);
707 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
708 NumParts = NumRegs; // Silence a compiler warning.
709 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
711 unsigned IntermediateNumElts = IntermediateVT.isVector() ?
712 IntermediateVT.getVectorNumElements() : 1;
714 // Convert the vector to the appropiate type if necessary.
715 unsigned DestVectorNoElts = NumIntermediates * IntermediateNumElts;
717 EVT BuiltVectorTy = EVT::getVectorVT(
718 *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
719 MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
720 if (ValueVT != BuiltVectorTy) {
721 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
724 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
727 // Split the vector into intermediate operands.
728 SmallVector<SDValue, 8> Ops(NumIntermediates);
729 for (unsigned i = 0; i != NumIntermediates; ++i) {
730 if (IntermediateVT.isVector()) {
731 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
732 DAG.getConstant(i * IntermediateNumElts, DL, IdxVT));
734 Ops[i] = DAG.getNode(
735 ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
736 DAG.getConstant(i, DL, IdxVT));
740 // Split the intermediate operands into legal parts.
741 if (NumParts == NumIntermediates) {
742 // If the register was not expanded, promote or copy the value,
744 for (unsigned i = 0; i != NumParts; ++i)
745 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
746 } else if (NumParts > 0) {
747 // If the intermediate type was expanded, split each the value into
749 assert(NumIntermediates != 0 && "division by zero");
750 assert(NumParts % NumIntermediates == 0 &&
751 "Must expand into a divisible number of parts!");
752 unsigned Factor = NumParts / NumIntermediates;
753 for (unsigned i = 0; i != NumIntermediates; ++i)
754 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
759 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt,
760 EVT valuevt, Optional<CallingConv::ID> CC)
761 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
762 RegCount(1, regs.size()), CallConv(CC) {}
764 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
765 const DataLayout &DL, unsigned Reg, Type *Ty,
766 Optional<CallingConv::ID> CC) {
767 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
771 for (EVT ValueVT : ValueVTs) {
774 ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
775 : TLI.getNumRegisters(Context, ValueVT);
778 ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
779 : TLI.getRegisterType(Context, ValueVT);
780 for (unsigned i = 0; i != NumRegs; ++i)
781 Regs.push_back(Reg + i);
782 RegVTs.push_back(RegisterVT);
783 RegCount.push_back(NumRegs);
788 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
789 FunctionLoweringInfo &FuncInfo,
790 const SDLoc &dl, SDValue &Chain,
791 SDValue *Flag, const Value *V) const {
792 // A Value with type {} or [0 x %t] needs no registers.
793 if (ValueVTs.empty())
796 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
798 // Assemble the legal parts into the final values.
799 SmallVector<SDValue, 4> Values(ValueVTs.size());
800 SmallVector<SDValue, 8> Parts;
801 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
802 // Copy the legal parts from the registers.
803 EVT ValueVT = ValueVTs[Value];
804 unsigned NumRegs = RegCount[Value];
805 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
807 CallConv.getValue(), RegVTs[Value])
810 Parts.resize(NumRegs);
811 for (unsigned i = 0; i != NumRegs; ++i) {
814 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
816 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
817 *Flag = P.getValue(2);
820 Chain = P.getValue(1);
823 // If the source register was virtual and if we know something about it,
824 // add an assert node.
825 if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
826 !RegisterVT.isInteger())
829 const FunctionLoweringInfo::LiveOutInfo *LOI =
830 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
834 unsigned RegSize = RegisterVT.getScalarSizeInBits();
835 unsigned NumSignBits = LOI->NumSignBits;
836 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
838 if (NumZeroBits == RegSize) {
839 // The current value is a zero.
840 // Explicitly express that as it would be easier for
841 // optimizations to kick in.
842 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
846 // FIXME: We capture more information than the dag can represent. For
847 // now, just use the tightest assertzext/assertsext possible.
849 EVT FromVT(MVT::Other);
851 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
853 } else if (NumSignBits > 1) {
855 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
860 // Add an assertion node.
861 assert(FromVT != MVT::Other);
862 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
863 RegisterVT, P, DAG.getValueType(FromVT));
866 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
867 RegisterVT, ValueVT, V, CallConv);
872 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
875 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
876 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
878 ISD::NodeType PreferredExtendType) const {
879 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
880 ISD::NodeType ExtendKind = PreferredExtendType;
882 // Get the list of the values's legal parts.
883 unsigned NumRegs = Regs.size();
884 SmallVector<SDValue, 8> Parts(NumRegs);
885 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
886 unsigned NumParts = RegCount[Value];
888 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
890 CallConv.getValue(), RegVTs[Value])
893 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
894 ExtendKind = ISD::ZERO_EXTEND;
896 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
897 NumParts, RegisterVT, V, CallConv, ExtendKind);
901 // Copy the parts into the registers.
902 SmallVector<SDValue, 8> Chains(NumRegs);
903 for (unsigned i = 0; i != NumRegs; ++i) {
906 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
908 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
909 *Flag = Part.getValue(1);
912 Chains[i] = Part.getValue(0);
915 if (NumRegs == 1 || Flag)
916 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
917 // flagged to it. That is the CopyToReg nodes and the user are considered
918 // a single scheduling unit. If we create a TokenFactor and return it as
919 // chain, then the TokenFactor is both a predecessor (operand) of the
920 // user as well as a successor (the TF operands are flagged to the user).
921 // c1, f1 = CopyToReg
922 // c2, f2 = CopyToReg
923 // c3 = TokenFactor c1, c2
926 Chain = Chains[NumRegs-1];
928 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
931 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
932 unsigned MatchingIdx, const SDLoc &dl,
934 std::vector<SDValue> &Ops) const {
935 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
937 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
939 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
940 else if (!Regs.empty() &&
941 TargetRegisterInfo::isVirtualRegister(Regs.front())) {
942 // Put the register class of the virtual registers in the flag word. That
943 // way, later passes can recompute register class constraints for inline
944 // assembly as well as normal instructions.
945 // Don't do this for tied operands that can use the regclass information
947 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
948 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
949 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
952 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
955 if (Code == InlineAsm::Kind_Clobber) {
956 // Clobbers should always have a 1:1 mapping with registers, and may
957 // reference registers that have illegal (e.g. vector) types. Hence, we
958 // shouldn't try to apply any sort of splitting logic to them.
959 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
960 "No 1:1 mapping from clobbers to regs?");
961 unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
963 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
964 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
967 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
968 "If we clobbered the stack pointer, MFI should know about it.");
973 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
974 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
975 MVT RegisterVT = RegVTs[Value];
976 for (unsigned i = 0; i != NumRegs; ++i) {
977 assert(Reg < Regs.size() && "Mismatch in # registers expected");
978 unsigned TheReg = Regs[Reg++];
979 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
984 SmallVector<std::pair<unsigned, unsigned>, 4>
985 RegsForValue::getRegsAndSizes() const {
986 SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
988 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
989 unsigned RegCount = std::get<0>(CountAndVT);
990 MVT RegisterVT = std::get<1>(CountAndVT);
991 unsigned RegisterSize = RegisterVT.getSizeInBits();
992 for (unsigned E = I + RegCount; I != E; ++I)
993 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
998 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
999 const TargetLibraryInfo *li) {
1003 DL = &DAG.getDataLayout();
1004 Context = DAG.getContext();
1005 LPadToCallSiteMap.clear();
1008 void SelectionDAGBuilder::clear() {
1010 UnusedArgNodeMap.clear();
1011 PendingLoads.clear();
1012 PendingExports.clear();
1014 HasTailCall = false;
1015 SDNodeOrder = LowestSDNodeOrder;
1016 StatepointLowering.clear();
1019 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1020 DanglingDebugInfoMap.clear();
1023 SDValue SelectionDAGBuilder::getRoot() {
1024 if (PendingLoads.empty())
1025 return DAG.getRoot();
1027 if (PendingLoads.size() == 1) {
1028 SDValue Root = PendingLoads[0];
1030 PendingLoads.clear();
1034 // Otherwise, we have to make a token factor node.
1035 // If we have >= 2^16 loads then split across multiple token factors as
1036 // there's a 64k limit on the number of SDNode operands.
1038 size_t Limit = (1 << 16) - 1;
1039 while (PendingLoads.size() > Limit) {
1040 unsigned SliceIdx = PendingLoads.size() - Limit;
1041 auto ExtractedTFs = ArrayRef<SDValue>(PendingLoads).slice(SliceIdx, Limit);
1043 DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, ExtractedTFs);
1044 PendingLoads.erase(PendingLoads.begin() + SliceIdx, PendingLoads.end());
1045 PendingLoads.emplace_back(NewTF);
1047 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, PendingLoads);
1048 PendingLoads.clear();
1053 SDValue SelectionDAGBuilder::getControlRoot() {
1054 SDValue Root = DAG.getRoot();
1056 if (PendingExports.empty())
1059 // Turn all of the CopyToReg chains into one factored node.
1060 if (Root.getOpcode() != ISD::EntryToken) {
1061 unsigned i = 0, e = PendingExports.size();
1062 for (; i != e; ++i) {
1063 assert(PendingExports[i].getNode()->getNumOperands() > 1);
1064 if (PendingExports[i].getNode()->getOperand(0) == Root)
1065 break; // Don't add the root if we already indirectly depend on it.
1069 PendingExports.push_back(Root);
1072 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
1074 PendingExports.clear();
1079 void SelectionDAGBuilder::visit(const Instruction &I) {
1080 // Set up outgoing PHI node register values before emitting the terminator.
1081 if (I.isTerminator()) {
1082 HandlePHINodesInSuccessorBlocks(I.getParent());
1085 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1086 if (!isa<DbgInfoIntrinsic>(I))
1091 visit(I.getOpcode(), I);
1093 if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1094 // Propagate the fast-math-flags of this IR instruction to the DAG node that
1095 // maps to this instruction.
1096 // TODO: We could handle all flags (nsw, etc) here.
1097 // TODO: If an IR instruction maps to >1 node, only the final node will have
1099 if (SDNode *Node = getNodeForIRValue(&I)) {
1100 SDNodeFlags IncomingFlags;
1101 IncomingFlags.copyFMF(*FPMO);
1102 if (!Node->getFlags().isDefined())
1103 Node->setFlags(IncomingFlags);
1105 Node->intersectFlagsWith(IncomingFlags);
1109 if (!I.isTerminator() && !HasTailCall &&
1110 !isStatepoint(&I)) // statepoints handle their exports internally
1111 CopyToExportRegsIfNeeded(&I);
1116 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1117 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1120 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1121 // Note: this doesn't use InstVisitor, because it has to work with
1122 // ConstantExpr's in addition to instructions.
1124 default: llvm_unreachable("Unknown instruction type encountered!");
1125 // Build the switch statement using the Instruction.def file.
1126 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1127 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1128 #include "llvm/IR/Instruction.def"
1132 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1133 const DIExpression *Expr) {
1134 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1135 const DbgValueInst *DI = DDI.getDI();
1136 DIVariable *DanglingVariable = DI->getVariable();
1137 DIExpression *DanglingExpr = DI->getExpression();
1138 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1139 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
1145 for (auto &DDIMI : DanglingDebugInfoMap) {
1146 DanglingDebugInfoVector &DDIV = DDIMI.second;
1147 DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1151 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1152 // generate the debug data structures now that we've seen its definition.
1153 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1155 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1156 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1159 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1160 for (auto &DDI : DDIV) {
1161 const DbgValueInst *DI = DDI.getDI();
1162 assert(DI && "Ill-formed DanglingDebugInfo");
1163 DebugLoc dl = DDI.getdl();
1164 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1165 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1166 DILocalVariable *Variable = DI->getVariable();
1167 DIExpression *Expr = DI->getExpression();
1168 assert(Variable->isValidLocationForIntrinsic(dl) &&
1169 "Expected inlined-at fields to agree");
1171 if (Val.getNode()) {
1172 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1173 LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1174 << DbgSDNodeOrder << "] for:\n " << *DI << "\n");
1175 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
1176 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1177 // inserted after the definition of Val when emitting the instructions
1178 // after ISel. An alternative could be to teach
1179 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1180 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1181 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1182 << ValSDNodeOrder << "\n");
1183 SDV = getDbgValue(Val, Variable, Expr, dl,
1184 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1185 DAG.AddDbgValue(SDV, Val.getNode(), false);
1187 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1188 << "in EmitFuncArgumentDbgValue\n");
1190 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1195 /// getCopyFromRegs - If there was virtual register allocated for the value V
1196 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1197 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1198 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1201 if (It != FuncInfo.ValueMap.end()) {
1202 unsigned InReg = It->second;
1204 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1205 DAG.getDataLayout(), InReg, Ty,
1206 None); // This is not an ABI copy.
1207 SDValue Chain = DAG.getEntryNode();
1208 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1210 resolveDanglingDebugInfo(V, Result);
1216 /// getValue - Return an SDValue for the given Value.
1217 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1218 // If we already have an SDValue for this value, use it. It's important
1219 // to do this first, so that we don't create a CopyFromReg if we already
1220 // have a regular SDValue.
1221 SDValue &N = NodeMap[V];
1222 if (N.getNode()) return N;
1224 // If there's a virtual register allocated and initialized for this
1226 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1229 // Otherwise create a new SDValue and remember it.
1230 SDValue Val = getValueImpl(V);
1232 resolveDanglingDebugInfo(V, Val);
1236 // Return true if SDValue exists for the given Value
1237 bool SelectionDAGBuilder::findValue(const Value *V) const {
1238 return (NodeMap.find(V) != NodeMap.end()) ||
1239 (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1242 /// getNonRegisterValue - Return an SDValue for the given Value, but
1243 /// don't look in FuncInfo.ValueMap for a virtual register.
1244 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1245 // If we already have an SDValue for this value, use it.
1246 SDValue &N = NodeMap[V];
1248 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1249 // Remove the debug location from the node as the node is about to be used
1250 // in a location which may differ from the original debug location. This
1251 // is relevant to Constant and ConstantFP nodes because they can appear
1252 // as constant expressions inside PHI nodes.
1253 N->setDebugLoc(DebugLoc());
1258 // Otherwise create a new SDValue and remember it.
1259 SDValue Val = getValueImpl(V);
1261 resolveDanglingDebugInfo(V, Val);
1265 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1266 /// Create an SDValue for the given value.
1267 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1268 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1270 if (const Constant *C = dyn_cast<Constant>(V)) {
1271 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1273 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1274 return DAG.getConstant(*CI, getCurSDLoc(), VT);
1276 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1277 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1279 if (isa<ConstantPointerNull>(C)) {
1280 unsigned AS = V->getType()->getPointerAddressSpace();
1281 return DAG.getConstant(0, getCurSDLoc(),
1282 TLI.getPointerTy(DAG.getDataLayout(), AS));
1285 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1286 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1288 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1289 return DAG.getUNDEF(VT);
1291 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1292 visit(CE->getOpcode(), *CE);
1293 SDValue N1 = NodeMap[V];
1294 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1298 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1299 SmallVector<SDValue, 4> Constants;
1300 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1302 SDNode *Val = getValue(*OI).getNode();
1303 // If the operand is an empty aggregate, there are no values.
1305 // Add each leaf value from the operand to the Constants list
1306 // to form a flattened list of all the values.
1307 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1308 Constants.push_back(SDValue(Val, i));
1311 return DAG.getMergeValues(Constants, getCurSDLoc());
1314 if (const ConstantDataSequential *CDS =
1315 dyn_cast<ConstantDataSequential>(C)) {
1316 SmallVector<SDValue, 4> Ops;
1317 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1318 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1319 // Add each leaf value from the operand to the Constants list
1320 // to form a flattened list of all the values.
1321 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1322 Ops.push_back(SDValue(Val, i));
1325 if (isa<ArrayType>(CDS->getType()))
1326 return DAG.getMergeValues(Ops, getCurSDLoc());
1327 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1330 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1331 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1332 "Unknown struct or array constant!");
1334 SmallVector<EVT, 4> ValueVTs;
1335 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1336 unsigned NumElts = ValueVTs.size();
1338 return SDValue(); // empty struct
1339 SmallVector<SDValue, 4> Constants(NumElts);
1340 for (unsigned i = 0; i != NumElts; ++i) {
1341 EVT EltVT = ValueVTs[i];
1342 if (isa<UndefValue>(C))
1343 Constants[i] = DAG.getUNDEF(EltVT);
1344 else if (EltVT.isFloatingPoint())
1345 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1347 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1350 return DAG.getMergeValues(Constants, getCurSDLoc());
1353 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1354 return DAG.getBlockAddress(BA, VT);
1356 VectorType *VecTy = cast<VectorType>(V->getType());
1357 unsigned NumElements = VecTy->getNumElements();
1359 // Now that we know the number and type of the elements, get that number of
1360 // elements into the Ops array based on what kind of constant it is.
1361 SmallVector<SDValue, 16> Ops;
1362 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1363 for (unsigned i = 0; i != NumElements; ++i)
1364 Ops.push_back(getValue(CV->getOperand(i)));
1366 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1368 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1371 if (EltVT.isFloatingPoint())
1372 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1374 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1375 Ops.assign(NumElements, Op);
1378 // Create a BUILD_VECTOR node.
1379 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1382 // If this is a static alloca, generate it as the frameindex instead of
1384 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1385 DenseMap<const AllocaInst*, int>::iterator SI =
1386 FuncInfo.StaticAllocaMap.find(AI);
1387 if (SI != FuncInfo.StaticAllocaMap.end())
1388 return DAG.getFrameIndex(SI->second,
1389 TLI.getFrameIndexTy(DAG.getDataLayout()));
1392 // If this is an instruction which fast-isel has deferred, select it now.
1393 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1394 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1396 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1397 Inst->getType(), getABIRegCopyCC(V));
1398 SDValue Chain = DAG.getEntryNode();
1399 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1402 llvm_unreachable("Can't get register for value!");
1405 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1406 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1407 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1408 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1409 bool IsSEH = isAsynchronousEHPersonality(Pers);
1410 bool IsWasmCXX = Pers == EHPersonality::Wasm_CXX;
1411 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1413 CatchPadMBB->setIsEHScopeEntry();
1414 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1415 if (IsMSVCCXX || IsCoreCLR)
1416 CatchPadMBB->setIsEHFuncletEntry();
1417 // Wasm does not need catchpads anymore
1419 DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other,
1423 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1424 // Update machine-CFG edge.
1425 MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1426 FuncInfo.MBB->addSuccessor(TargetMBB);
1428 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1429 bool IsSEH = isAsynchronousEHPersonality(Pers);
1431 // If this is not a fall-through branch or optimizations are switched off,
1433 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1434 TM.getOptLevel() == CodeGenOpt::None)
1435 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1436 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1440 // Figure out the funclet membership for the catchret's successor.
1441 // This will be used by the FuncletLayout pass to determine how to order the
1443 // A 'catchret' returns to the outer scope's color.
1444 Value *ParentPad = I.getCatchSwitchParentPad();
1445 const BasicBlock *SuccessorColor;
1446 if (isa<ConstantTokenNone>(ParentPad))
1447 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1449 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1450 assert(SuccessorColor && "No parent funclet for catchret!");
1451 MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1452 assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1454 // Create the terminator node.
1455 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1456 getControlRoot(), DAG.getBasicBlock(TargetMBB),
1457 DAG.getBasicBlock(SuccessorColorMBB));
1461 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1462 // Don't emit any special code for the cleanuppad instruction. It just marks
1463 // the start of an EH scope/funclet.
1464 FuncInfo.MBB->setIsEHScopeEntry();
1465 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1466 if (Pers != EHPersonality::Wasm_CXX) {
1467 FuncInfo.MBB->setIsEHFuncletEntry();
1468 FuncInfo.MBB->setIsCleanupFuncletEntry();
1472 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1473 /// many places it could ultimately go. In the IR, we have a single unwind
1474 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1475 /// This function skips over imaginary basic blocks that hold catchswitch
1476 /// instructions, and finds all the "real" machine
1477 /// basic block destinations. As those destinations may not be successors of
1478 /// EHPadBB, here we also calculate the edge probability to those destinations.
1479 /// The passed-in Prob is the edge probability to EHPadBB.
1480 static void findUnwindDestinations(
1481 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1482 BranchProbability Prob,
1483 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1485 EHPersonality Personality =
1486 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1487 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1488 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1489 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1490 bool IsSEH = isAsynchronousEHPersonality(Personality);
1493 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1494 BasicBlock *NewEHPadBB = nullptr;
1495 if (isa<LandingPadInst>(Pad)) {
1496 // Stop on landingpads. They are not funclets.
1497 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1499 } else if (isa<CleanupPadInst>(Pad)) {
1500 // Stop on cleanup pads. Cleanups are always funclet entries for all known
1502 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1503 UnwindDests.back().first->setIsEHScopeEntry();
1505 UnwindDests.back().first->setIsEHFuncletEntry();
1507 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1508 // Add the catchpad handlers to the possible destinations.
1509 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1510 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1511 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1512 if (IsMSVCCXX || IsCoreCLR)
1513 UnwindDests.back().first->setIsEHFuncletEntry();
1515 UnwindDests.back().first->setIsEHScopeEntry();
1517 NewEHPadBB = CatchSwitch->getUnwindDest();
1522 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1523 if (BPI && NewEHPadBB)
1524 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1525 EHPadBB = NewEHPadBB;
1529 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1530 // Update successor info.
1531 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1532 auto UnwindDest = I.getUnwindDest();
1533 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1534 BranchProbability UnwindDestProb =
1536 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1537 : BranchProbability::getZero();
1538 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1539 for (auto &UnwindDest : UnwindDests) {
1540 UnwindDest.first->setIsEHPad();
1541 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1543 FuncInfo.MBB->normalizeSuccProbs();
1545 // Create the terminator node.
1547 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1551 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1552 report_fatal_error("visitCatchSwitch not yet implemented!");
1555 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1556 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1557 auto &DL = DAG.getDataLayout();
1558 SDValue Chain = getControlRoot();
1559 SmallVector<ISD::OutputArg, 8> Outs;
1560 SmallVector<SDValue, 8> OutVals;
1562 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1565 // %val = call <ty> @llvm.experimental.deoptimize()
1569 if (I.getParent()->getTerminatingDeoptimizeCall()) {
1570 LowerDeoptimizingReturn();
1574 if (!FuncInfo.CanLowerReturn) {
1575 unsigned DemoteReg = FuncInfo.DemoteRegister;
1576 const Function *F = I.getParent()->getParent();
1578 // Emit a store of the return value through the virtual register.
1579 // Leave Outs empty so that LowerReturn won't try to load return
1580 // registers the usual way.
1581 SmallVector<EVT, 1> PtrValueVTs;
1582 ComputeValueVTs(TLI, DL,
1583 F->getReturnType()->getPointerTo(
1584 DAG.getDataLayout().getAllocaAddrSpace()),
1587 SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1588 DemoteReg, PtrValueVTs[0]);
1589 SDValue RetOp = getValue(I.getOperand(0));
1591 SmallVector<EVT, 4> ValueVTs;
1592 SmallVector<uint64_t, 4> Offsets;
1593 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1594 unsigned NumValues = ValueVTs.size();
1596 SmallVector<SDValue, 4> Chains(NumValues);
1597 for (unsigned i = 0; i != NumValues; ++i) {
1598 // An aggregate return value cannot wrap around the address space, so
1599 // offsets to its parts don't wrap either.
1600 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1601 Chains[i] = DAG.getStore(
1602 Chain, getCurSDLoc(), SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1603 // FIXME: better loc info would be nice.
1604 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
1607 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1608 MVT::Other, Chains);
1609 } else if (I.getNumOperands() != 0) {
1610 SmallVector<EVT, 4> ValueVTs;
1611 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1612 unsigned NumValues = ValueVTs.size();
1614 SDValue RetOp = getValue(I.getOperand(0));
1616 const Function *F = I.getParent()->getParent();
1618 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1619 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1621 ExtendKind = ISD::SIGN_EXTEND;
1622 else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1624 ExtendKind = ISD::ZERO_EXTEND;
1626 LLVMContext &Context = F->getContext();
1627 bool RetInReg = F->getAttributes().hasAttribute(
1628 AttributeList::ReturnIndex, Attribute::InReg);
1630 for (unsigned j = 0; j != NumValues; ++j) {
1631 EVT VT = ValueVTs[j];
1633 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1634 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1636 CallingConv::ID CC = F->getCallingConv();
1638 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1639 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1640 SmallVector<SDValue, 4> Parts(NumParts);
1641 getCopyToParts(DAG, getCurSDLoc(),
1642 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1643 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1645 // 'inreg' on function refers to return value
1646 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1650 // Propagate extension type if any
1651 if (ExtendKind == ISD::SIGN_EXTEND)
1653 else if (ExtendKind == ISD::ZERO_EXTEND)
1656 for (unsigned i = 0; i < NumParts; ++i) {
1657 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1658 VT, /*isfixed=*/true, 0, 0));
1659 OutVals.push_back(Parts[i]);
1665 // Push in swifterror virtual register as the last element of Outs. This makes
1666 // sure swifterror virtual register will be returned in the swifterror
1667 // physical register.
1668 const Function *F = I.getParent()->getParent();
1669 if (TLI.supportSwiftError() &&
1670 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1671 assert(FuncInfo.SwiftErrorArg && "Need a swift error argument");
1672 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1673 Flags.setSwiftError();
1674 Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1675 EVT(TLI.getPointerTy(DL)) /*argvt*/,
1676 true /*isfixed*/, 1 /*origidx*/,
1678 // Create SDNode for the swifterror virtual register.
1680 DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVRegUseAt(
1681 &I, FuncInfo.MBB, FuncInfo.SwiftErrorArg).first,
1682 EVT(TLI.getPointerTy(DL))));
1685 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1686 CallingConv::ID CallConv =
1687 DAG.getMachineFunction().getFunction().getCallingConv();
1688 Chain = DAG.getTargetLoweringInfo().LowerReturn(
1689 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1691 // Verify that the target's LowerReturn behaved as expected.
1692 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1693 "LowerReturn didn't return a valid chain!");
1695 // Update the DAG with the new chain value resulting from return lowering.
1699 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1700 /// created for it, emit nodes to copy the value into the virtual
1702 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1704 if (V->getType()->isEmptyTy())
1707 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1708 if (VMI != FuncInfo.ValueMap.end()) {
1709 assert(!V->use_empty() && "Unused value assigned virtual registers!");
1710 CopyValueToVirtualRegister(V, VMI->second);
1714 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1715 /// the current basic block, add it to ValueMap now so that we'll get a
1717 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1718 // No need to export constants.
1719 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1721 // Already exported?
1722 if (FuncInfo.isExportedInst(V)) return;
1724 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1725 CopyValueToVirtualRegister(V, Reg);
1728 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1729 const BasicBlock *FromBB) {
1730 // The operands of the setcc have to be in this block. We don't know
1731 // how to export them from some other block.
1732 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1733 // Can export from current BB.
1734 if (VI->getParent() == FromBB)
1737 // Is already exported, noop.
1738 return FuncInfo.isExportedInst(V);
1741 // If this is an argument, we can export it if the BB is the entry block or
1742 // if it is already exported.
1743 if (isa<Argument>(V)) {
1744 if (FromBB == &FromBB->getParent()->getEntryBlock())
1747 // Otherwise, can only export this if it is already exported.
1748 return FuncInfo.isExportedInst(V);
1751 // Otherwise, constants can always be exported.
1755 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1757 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
1758 const MachineBasicBlock *Dst) const {
1759 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1760 const BasicBlock *SrcBB = Src->getBasicBlock();
1761 const BasicBlock *DstBB = Dst->getBasicBlock();
1763 // If BPI is not available, set the default probability as 1 / N, where N is
1764 // the number of successors.
1765 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
1766 return BranchProbability(1, SuccSize);
1768 return BPI->getEdgeProbability(SrcBB, DstBB);
1771 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
1772 MachineBasicBlock *Dst,
1773 BranchProbability Prob) {
1775 Src->addSuccessorWithoutProb(Dst);
1777 if (Prob.isUnknown())
1778 Prob = getEdgeProbability(Src, Dst);
1779 Src->addSuccessor(Dst, Prob);
1783 static bool InBlock(const Value *V, const BasicBlock *BB) {
1784 if (const Instruction *I = dyn_cast<Instruction>(V))
1785 return I->getParent() == BB;
1789 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1790 /// This function emits a branch and is used at the leaves of an OR or an
1791 /// AND operator tree.
1793 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1794 MachineBasicBlock *TBB,
1795 MachineBasicBlock *FBB,
1796 MachineBasicBlock *CurBB,
1797 MachineBasicBlock *SwitchBB,
1798 BranchProbability TProb,
1799 BranchProbability FProb,
1801 const BasicBlock *BB = CurBB->getBasicBlock();
1803 // If the leaf of the tree is a comparison, merge the condition into
1805 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1806 // The operands of the cmp have to be in this block. We don't know
1807 // how to export them from some other block. If this is the first block
1808 // of the sequence, no exporting is needed.
1809 if (CurBB == SwitchBB ||
1810 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1811 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1812 ISD::CondCode Condition;
1813 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1814 ICmpInst::Predicate Pred =
1815 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
1816 Condition = getICmpCondCode(Pred);
1818 const FCmpInst *FC = cast<FCmpInst>(Cond);
1819 FCmpInst::Predicate Pred =
1820 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
1821 Condition = getFCmpCondCode(Pred);
1822 if (TM.Options.NoNaNsFPMath)
1823 Condition = getFCmpCodeWithoutNaN(Condition);
1826 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1827 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1828 SwitchCases.push_back(CB);
1833 // Create a CaseBlock record representing this branch.
1834 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
1835 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
1836 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1837 SwitchCases.push_back(CB);
1840 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1841 MachineBasicBlock *TBB,
1842 MachineBasicBlock *FBB,
1843 MachineBasicBlock *CurBB,
1844 MachineBasicBlock *SwitchBB,
1845 Instruction::BinaryOps Opc,
1846 BranchProbability TProb,
1847 BranchProbability FProb,
1849 // Skip over not part of the tree and remember to invert op and operands at
1852 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
1853 InBlock(NotCond, CurBB->getBasicBlock())) {
1854 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
1859 const Instruction *BOp = dyn_cast<Instruction>(Cond);
1860 // Compute the effective opcode for Cond, taking into account whether it needs
1861 // to be inverted, e.g.
1862 // and (not (or A, B)), C
1864 // and (and (not A, not B), C)
1867 BOpc = BOp->getOpcode();
1869 if (BOpc == Instruction::And)
1870 BOpc = Instruction::Or;
1871 else if (BOpc == Instruction::Or)
1872 BOpc = Instruction::And;
1876 // If this node is not part of the or/and tree, emit it as a branch.
1877 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1878 BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
1879 BOp->getParent() != CurBB->getBasicBlock() ||
1880 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1881 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1882 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1883 TProb, FProb, InvertCond);
1887 // Create TmpBB after CurBB.
1888 MachineFunction::iterator BBI(CurBB);
1889 MachineFunction &MF = DAG.getMachineFunction();
1890 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1891 CurBB->getParent()->insert(++BBI, TmpBB);
1893 if (Opc == Instruction::Or) {
1894 // Codegen X | Y as:
1903 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1904 // The requirement is that
1905 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1906 // = TrueProb for original BB.
1907 // Assuming the original probabilities are A and B, one choice is to set
1908 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
1909 // A/(1+B) and 2B/(1+B). This choice assumes that
1910 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1911 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1912 // TmpBB, but the math is more complicated.
1914 auto NewTrueProb = TProb / 2;
1915 auto NewFalseProb = TProb / 2 + FProb;
1916 // Emit the LHS condition.
1917 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1918 NewTrueProb, NewFalseProb, InvertCond);
1920 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
1921 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
1922 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1923 // Emit the RHS condition into TmpBB.
1924 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1925 Probs[0], Probs[1], InvertCond);
1927 assert(Opc == Instruction::And && "Unknown merge op!");
1928 // Codegen X & Y as:
1936 // This requires creation of TmpBB after CurBB.
1938 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1939 // The requirement is that
1940 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1941 // = FalseProb for original BB.
1942 // Assuming the original probabilities are A and B, one choice is to set
1943 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
1944 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
1945 // TrueProb for BB1 * FalseProb for TmpBB.
1947 auto NewTrueProb = TProb + FProb / 2;
1948 auto NewFalseProb = FProb / 2;
1949 // Emit the LHS condition.
1950 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1951 NewTrueProb, NewFalseProb, InvertCond);
1953 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
1954 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
1955 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1956 // Emit the RHS condition into TmpBB.
1957 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1958 Probs[0], Probs[1], InvertCond);
1962 /// If the set of cases should be emitted as a series of branches, return true.
1963 /// If we should emit this as a bunch of and/or'd together conditions, return
1966 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1967 if (Cases.size() != 2) return true;
1969 // If this is two comparisons of the same values or'd or and'd together, they
1970 // will get folded into a single comparison, so don't emit two blocks.
1971 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1972 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1973 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1974 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1978 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1979 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1980 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1981 Cases[0].CC == Cases[1].CC &&
1982 isa<Constant>(Cases[0].CmpRHS) &&
1983 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1984 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1986 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1993 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1994 MachineBasicBlock *BrMBB = FuncInfo.MBB;
1996 // Update machine-CFG edges.
1997 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1999 if (I.isUnconditional()) {
2000 // Update machine-CFG edges.
2001 BrMBB->addSuccessor(Succ0MBB);
2003 // If this is not a fall-through branch or optimizations are switched off,
2005 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
2006 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2007 MVT::Other, getControlRoot(),
2008 DAG.getBasicBlock(Succ0MBB)));
2013 // If this condition is one of the special cases we handle, do special stuff
2015 const Value *CondVal = I.getCondition();
2016 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2018 // If this is a series of conditions that are or'd or and'd together, emit
2019 // this as a sequence of branches instead of setcc's with and/or operations.
2020 // As long as jumps are not expensive, this should improve performance.
2021 // For example, instead of something like:
2033 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2034 Instruction::BinaryOps Opcode = BOp->getOpcode();
2035 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2036 !I.getMetadata(LLVMContext::MD_unpredictable) &&
2037 (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2038 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2040 getEdgeProbability(BrMBB, Succ0MBB),
2041 getEdgeProbability(BrMBB, Succ1MBB),
2042 /*InvertCond=*/false);
2043 // If the compares in later blocks need to use values not currently
2044 // exported from this block, export them now. This block should always
2045 // be the first entry.
2046 assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2048 // Allow some cases to be rejected.
2049 if (ShouldEmitAsBranches(SwitchCases)) {
2050 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
2051 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
2052 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
2055 // Emit the branch for this block.
2056 visitSwitchCase(SwitchCases[0], BrMBB);
2057 SwitchCases.erase(SwitchCases.begin());
2061 // Okay, we decided not to do this, remove any inserted MBB's and clear
2063 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
2064 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
2066 SwitchCases.clear();
2070 // Create a CaseBlock record representing this branch.
2071 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2072 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2074 // Use visitSwitchCase to actually insert the fast branch sequence for this
2076 visitSwitchCase(CB, BrMBB);
2079 /// visitSwitchCase - Emits the necessary code to represent a single node in
2080 /// the binary search tree resulting from lowering a switch instruction.
2081 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2082 MachineBasicBlock *SwitchBB) {
2084 SDValue CondLHS = getValue(CB.CmpLHS);
2087 // Build the setcc now.
2089 // Fold "(X == true)" to X and "(X == false)" to !X to
2090 // handle common cases produced by branch lowering.
2091 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2092 CB.CC == ISD::SETEQ)
2094 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2095 CB.CC == ISD::SETEQ) {
2096 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2097 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2099 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
2101 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2103 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2104 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2106 SDValue CmpOp = getValue(CB.CmpMHS);
2107 EVT VT = CmpOp.getValueType();
2109 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2110 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2113 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2114 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2115 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2116 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2120 // Update successor info
2121 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2122 // TrueBB and FalseBB are always different unless the incoming IR is
2123 // degenerate. This only happens when running llc on weird IR.
2124 if (CB.TrueBB != CB.FalseBB)
2125 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2126 SwitchBB->normalizeSuccProbs();
2128 // If the lhs block is the next block, invert the condition so that we can
2129 // fall through to the lhs instead of the rhs block.
2130 if (CB.TrueBB == NextBlock(SwitchBB)) {
2131 std::swap(CB.TrueBB, CB.FalseBB);
2132 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2133 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2136 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2137 MVT::Other, getControlRoot(), Cond,
2138 DAG.getBasicBlock(CB.TrueBB));
2140 // Insert the false branch. Do this even if it's a fall through branch,
2141 // this makes it easier to do DAG optimizations which require inverting
2142 // the branch condition.
2143 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2144 DAG.getBasicBlock(CB.FalseBB));
2146 DAG.setRoot(BrCond);
2149 /// visitJumpTable - Emit JumpTable node in the current MBB
2150 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
2151 // Emit the code for the jump table
2152 assert(JT.Reg != -1U && "Should lower JT Header first!");
2153 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2154 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2156 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2157 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2158 MVT::Other, Index.getValue(1),
2160 DAG.setRoot(BrJumpTable);
2163 /// visitJumpTableHeader - This function emits necessary code to produce index
2164 /// in the JumpTable from switch case.
2165 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
2166 JumpTableHeader &JTH,
2167 MachineBasicBlock *SwitchBB) {
2168 SDLoc dl = getCurSDLoc();
2170 // Subtract the lowest switch case value from the value being switched on and
2171 // conditional branch to default mbb if the result is greater than the
2172 // difference between smallest and largest cases.
2173 SDValue SwitchOp = getValue(JTH.SValue);
2174 EVT VT = SwitchOp.getValueType();
2175 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2176 DAG.getConstant(JTH.First, dl, VT));
2178 // The SDNode we just created, which holds the value being switched on minus
2179 // the smallest case value, needs to be copied to a virtual register so it
2180 // can be used as an index into the jump table in a subsequent basic block.
2181 // This value may be smaller or larger than the target's pointer type, and
2182 // therefore require extension or truncating.
2183 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2184 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2186 unsigned JumpTableReg =
2187 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2188 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2189 JumpTableReg, SwitchOp);
2190 JT.Reg = JumpTableReg;
2192 // Emit the range check for the jump table, and branch to the default block
2193 // for the switch statement if the value being switched on exceeds the largest
2194 // case in the switch.
2195 SDValue CMP = DAG.getSetCC(
2196 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2197 Sub.getValueType()),
2198 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2200 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2201 MVT::Other, CopyTo, CMP,
2202 DAG.getBasicBlock(JT.Default));
2204 // Avoid emitting unnecessary branches to the next block.
2205 if (JT.MBB != NextBlock(SwitchBB))
2206 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2207 DAG.getBasicBlock(JT.MBB));
2209 DAG.setRoot(BrCond);
2212 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2213 /// variable if there exists one.
2214 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2216 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2217 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2218 MachineFunction &MF = DAG.getMachineFunction();
2219 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2220 MachineSDNode *Node =
2221 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2223 MachinePointerInfo MPInfo(Global);
2224 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2225 MachineMemOperand::MODereferenceable;
2226 MachineMemOperand *MemRef = MF.getMachineMemOperand(
2227 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlignment(PtrTy));
2228 DAG.setNodeMemRefs(Node, {MemRef});
2230 return SDValue(Node, 0);
2233 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2234 /// tail spliced into a stack protector check success bb.
2236 /// For a high level explanation of how this fits into the stack protector
2237 /// generation see the comment on the declaration of class
2238 /// StackProtectorDescriptor.
2239 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2240 MachineBasicBlock *ParentBB) {
2242 // First create the loads to the guard/stack slot for the comparison.
2243 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2244 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2246 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2247 int FI = MFI.getStackProtectorIndex();
2250 SDLoc dl = getCurSDLoc();
2251 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2252 const Module &M = *ParentBB->getParent()->getFunction().getParent();
2253 unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2255 // Generate code to load the content of the guard slot.
2256 SDValue GuardVal = DAG.getLoad(
2257 PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
2258 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2259 MachineMemOperand::MOVolatile);
2261 if (TLI.useStackGuardXorFP())
2262 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2264 // Retrieve guard check function, nullptr if instrumentation is inlined.
2265 if (const Value *GuardCheck = TLI.getSSPStackGuardCheck(M)) {
2266 // The target provides a guard check function to validate the guard value.
2267 // Generate a call to that function with the content of the guard slot as
2269 auto *Fn = cast<Function>(GuardCheck);
2270 FunctionType *FnTy = Fn->getFunctionType();
2271 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2273 TargetLowering::ArgListTy Args;
2274 TargetLowering::ArgListEntry Entry;
2275 Entry.Node = GuardVal;
2276 Entry.Ty = FnTy->getParamType(0);
2277 if (Fn->hasAttribute(1, Attribute::AttrKind::InReg))
2278 Entry.IsInReg = true;
2279 Args.push_back(Entry);
2281 TargetLowering::CallLoweringInfo CLI(DAG);
2282 CLI.setDebugLoc(getCurSDLoc())
2283 .setChain(DAG.getEntryNode())
2284 .setCallee(Fn->getCallingConv(), FnTy->getReturnType(),
2285 getValue(GuardCheck), std::move(Args));
2287 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2288 DAG.setRoot(Result.second);
2292 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2293 // Otherwise, emit a volatile load to retrieve the stack guard value.
2294 SDValue Chain = DAG.getEntryNode();
2295 if (TLI.useLoadStackGuardNode()) {
2296 Guard = getLoadStackGuard(DAG, dl, Chain);
2298 const Value *IRGuard = TLI.getSDagStackGuard(M);
2299 SDValue GuardPtr = getValue(IRGuard);
2302 DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0),
2303 Align, MachineMemOperand::MOVolatile);
2306 // Perform the comparison via a subtract/getsetcc.
2307 EVT VT = Guard.getValueType();
2308 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, GuardVal);
2310 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2312 Sub.getValueType()),
2313 Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
2315 // If the sub is not 0, then we know the guard/stackslot do not equal, so
2316 // branch to failure MBB.
2317 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2318 MVT::Other, GuardVal.getOperand(0),
2319 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2320 // Otherwise branch to success MBB.
2321 SDValue Br = DAG.getNode(ISD::BR, dl,
2323 DAG.getBasicBlock(SPD.getSuccessMBB()));
2328 /// Codegen the failure basic block for a stack protector check.
2330 /// A failure stack protector machine basic block consists simply of a call to
2331 /// __stack_chk_fail().
2333 /// For a high level explanation of how this fits into the stack protector
2334 /// generation see the comment on the declaration of class
2335 /// StackProtectorDescriptor.
2337 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2338 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2340 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2341 None, false, getCurSDLoc(), false, false).second;
2345 /// visitBitTestHeader - This function emits necessary code to produce value
2346 /// suitable for "bit tests"
2347 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2348 MachineBasicBlock *SwitchBB) {
2349 SDLoc dl = getCurSDLoc();
2351 // Subtract the minimum value
2352 SDValue SwitchOp = getValue(B.SValue);
2353 EVT VT = SwitchOp.getValueType();
2354 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2355 DAG.getConstant(B.First, dl, VT));
2358 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2359 SDValue RangeCmp = DAG.getSetCC(
2360 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2361 Sub.getValueType()),
2362 Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
2364 // Determine the type of the test operands.
2365 bool UsePtrType = false;
2366 if (!TLI.isTypeLegal(VT))
2369 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2370 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2371 // Switch table case range are encoded into series of masks.
2372 // Just use pointer type, it's guaranteed to fit.
2378 VT = TLI.getPointerTy(DAG.getDataLayout());
2379 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2382 B.RegVT = VT.getSimpleVT();
2383 B.Reg = FuncInfo.CreateReg(B.RegVT);
2384 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2386 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2388 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2389 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2390 SwitchBB->normalizeSuccProbs();
2392 SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
2393 MVT::Other, CopyTo, RangeCmp,
2394 DAG.getBasicBlock(B.Default));
2396 // Avoid emitting unnecessary branches to the next block.
2397 if (MBB != NextBlock(SwitchBB))
2398 BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
2399 DAG.getBasicBlock(MBB));
2401 DAG.setRoot(BrRange);
2404 /// visitBitTestCase - this function produces one "bit test"
2405 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2406 MachineBasicBlock* NextMBB,
2407 BranchProbability BranchProbToNext,
2410 MachineBasicBlock *SwitchBB) {
2411 SDLoc dl = getCurSDLoc();
2413 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2415 unsigned PopCount = countPopulation(B.Mask);
2416 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2417 if (PopCount == 1) {
2418 // Testing for a single bit; just compare the shift count with what it
2419 // would need to be to shift a 1 bit in that position.
2421 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2422 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2424 } else if (PopCount == BB.Range) {
2425 // There is only one zero bit in the range, test for it directly.
2427 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2428 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2431 // Make desired shift
2432 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2433 DAG.getConstant(1, dl, VT), ShiftOp);
2435 // Emit bit tests and jumps
2436 SDValue AndOp = DAG.getNode(ISD::AND, dl,
2437 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2439 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2440 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2443 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2444 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2445 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2446 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2447 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2448 // one as they are relative probabilities (and thus work more like weights),
2449 // and hence we need to normalize them to let the sum of them become one.
2450 SwitchBB->normalizeSuccProbs();
2452 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2453 MVT::Other, getControlRoot(),
2454 Cmp, DAG.getBasicBlock(B.TargetBB));
2456 // Avoid emitting unnecessary branches to the next block.
2457 if (NextMBB != NextBlock(SwitchBB))
2458 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2459 DAG.getBasicBlock(NextMBB));
2464 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2465 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2467 // Retrieve successors. Look through artificial IR level blocks like
2468 // catchswitch for successors.
2469 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2470 const BasicBlock *EHPadBB = I.getSuccessor(1);
2472 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2473 // have to do anything here to lower funclet bundles.
2474 assert(!I.hasOperandBundlesOtherThan(
2475 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2476 "Cannot lower invokes with arbitrary operand bundles yet!");
2478 const Value *Callee(I.getCalledValue());
2479 const Function *Fn = dyn_cast<Function>(Callee);
2480 if (isa<InlineAsm>(Callee))
2482 else if (Fn && Fn->isIntrinsic()) {
2483 switch (Fn->getIntrinsicID()) {
2485 llvm_unreachable("Cannot invoke this intrinsic");
2486 case Intrinsic::donothing:
2487 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2489 case Intrinsic::experimental_patchpoint_void:
2490 case Intrinsic::experimental_patchpoint_i64:
2491 visitPatchpoint(&I, EHPadBB);
2493 case Intrinsic::experimental_gc_statepoint:
2494 LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2497 } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2498 // Currently we do not lower any intrinsic calls with deopt operand bundles.
2499 // Eventually we will support lowering the @llvm.experimental.deoptimize
2500 // intrinsic, and right now there are no plans to support other intrinsics
2501 // with deopt state.
2502 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2504 LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2507 // If the value of the invoke is used outside of its defining block, make it
2508 // available as a virtual register.
2509 // We already took care of the exported value for the statepoint instruction
2510 // during call to the LowerStatepoint.
2511 if (!isStatepoint(I)) {
2512 CopyToExportRegsIfNeeded(&I);
2515 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2516 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2517 BranchProbability EHPadBBProb =
2518 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2519 : BranchProbability::getZero();
2520 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2522 // Update successor info.
2523 addSuccessorWithProb(InvokeMBB, Return);
2524 for (auto &UnwindDest : UnwindDests) {
2525 UnwindDest.first->setIsEHPad();
2526 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2528 InvokeMBB->normalizeSuccProbs();
2530 // Drop into normal successor.
2531 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2532 MVT::Other, getControlRoot(),
2533 DAG.getBasicBlock(Return)));
2536 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2537 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2540 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2541 assert(FuncInfo.MBB->isEHPad() &&
2542 "Call to landingpad not in landing pad!");
2544 // If there aren't registers to copy the values into (e.g., during SjLj
2545 // exceptions), then don't bother to create these DAG nodes.
2546 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2547 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2548 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2549 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2552 // If landingpad's return type is token type, we don't create DAG nodes
2553 // for its exception pointer and selector value. The extraction of exception
2554 // pointer or selector value from token type landingpads is not currently
2556 if (LP.getType()->isTokenTy())
2559 SmallVector<EVT, 2> ValueVTs;
2560 SDLoc dl = getCurSDLoc();
2561 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2562 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2564 // Get the two live-in registers as SDValues. The physregs have already been
2565 // copied into virtual registers.
2567 if (FuncInfo.ExceptionPointerVirtReg) {
2568 Ops[0] = DAG.getZExtOrTrunc(
2569 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2570 FuncInfo.ExceptionPointerVirtReg,
2571 TLI.getPointerTy(DAG.getDataLayout())),
2574 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2576 Ops[1] = DAG.getZExtOrTrunc(
2577 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2578 FuncInfo.ExceptionSelectorVirtReg,
2579 TLI.getPointerTy(DAG.getDataLayout())),
2583 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2584 DAG.getVTList(ValueVTs), Ops);
2588 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2590 for (const CaseCluster &CC : Clusters)
2591 assert(CC.Low == CC.High && "Input clusters must be single-case");
2594 llvm::sort(Clusters, [](const CaseCluster &a, const CaseCluster &b) {
2595 return a.Low->getValue().slt(b.Low->getValue());
2598 // Merge adjacent clusters with the same destination.
2599 const unsigned N = Clusters.size();
2600 unsigned DstIndex = 0;
2601 for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
2602 CaseCluster &CC = Clusters[SrcIndex];
2603 const ConstantInt *CaseVal = CC.Low;
2604 MachineBasicBlock *Succ = CC.MBB;
2606 if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2607 (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2608 // If this case has the same successor and is a neighbour, merge it into
2609 // the previous cluster.
2610 Clusters[DstIndex - 1].High = CaseVal;
2611 Clusters[DstIndex - 1].Prob += CC.Prob;
2613 std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2614 sizeof(Clusters[SrcIndex]));
2617 Clusters.resize(DstIndex);
2620 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2621 MachineBasicBlock *Last) {
2623 for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2624 if (JTCases[i].first.HeaderBB == First)
2625 JTCases[i].first.HeaderBB = Last;
2627 // Update BitTestCases.
2628 for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2629 if (BitTestCases[i].Parent == First)
2630 BitTestCases[i].Parent = Last;
2633 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2634 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2636 // Update machine-CFG edges with unique successors.
2637 SmallSet<BasicBlock*, 32> Done;
2638 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2639 BasicBlock *BB = I.getSuccessor(i);
2640 bool Inserted = Done.insert(BB).second;
2644 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2645 addSuccessorWithProb(IndirectBrMBB, Succ);
2647 IndirectBrMBB->normalizeSuccProbs();
2649 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2650 MVT::Other, getControlRoot(),
2651 getValue(I.getAddress())));
2654 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2655 if (!DAG.getTarget().Options.TrapUnreachable)
2658 // We may be able to ignore unreachable behind a noreturn call.
2659 if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2660 const BasicBlock &BB = *I.getParent();
2661 if (&I != &BB.front()) {
2662 BasicBlock::const_iterator PredI =
2663 std::prev(BasicBlock::const_iterator(&I));
2664 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2665 if (Call->doesNotReturn())
2671 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2674 void SelectionDAGBuilder::visitFSub(const User &I) {
2675 // -0.0 - X --> fneg
2676 Type *Ty = I.getType();
2677 if (isa<Constant>(I.getOperand(0)) &&
2678 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2679 SDValue Op2 = getValue(I.getOperand(1));
2680 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2681 Op2.getValueType(), Op2));
2685 visitBinary(I, ISD::FSUB);
2688 /// Checks if the given instruction performs a vector reduction, in which case
2689 /// we have the freedom to alter the elements in the result as long as the
2690 /// reduction of them stays unchanged.
2691 static bool isVectorReductionOp(const User *I) {
2692 const Instruction *Inst = dyn_cast<Instruction>(I);
2693 if (!Inst || !Inst->getType()->isVectorTy())
2696 auto OpCode = Inst->getOpcode();
2698 case Instruction::Add:
2699 case Instruction::Mul:
2700 case Instruction::And:
2701 case Instruction::Or:
2702 case Instruction::Xor:
2704 case Instruction::FAdd:
2705 case Instruction::FMul:
2706 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2707 if (FPOp->getFastMathFlags().isFast())
2714 unsigned ElemNum = Inst->getType()->getVectorNumElements();
2715 // Ensure the reduction size is a power of 2.
2716 if (!isPowerOf2_32(ElemNum))
2719 unsigned ElemNumToReduce = ElemNum;
2721 // Do DFS search on the def-use chain from the given instruction. We only
2722 // allow four kinds of operations during the search until we reach the
2723 // instruction that extracts the first element from the vector:
2725 // 1. The reduction operation of the same opcode as the given instruction.
2729 // 3. ShuffleVector instruction together with a reduction operation that
2730 // does a partial reduction.
2732 // 4. ExtractElement that extracts the first element from the vector, and we
2733 // stop searching the def-use chain here.
2735 // 3 & 4 above perform a reduction on all elements of the vector. We push defs
2736 // from 1-3 to the stack to continue the DFS. The given instruction is not
2737 // a reduction operation if we meet any other instructions other than those
2740 SmallVector<const User *, 16> UsersToVisit{Inst};
2741 SmallPtrSet<const User *, 16> Visited;
2742 bool ReduxExtracted = false;
2744 while (!UsersToVisit.empty()) {
2745 auto User = UsersToVisit.back();
2746 UsersToVisit.pop_back();
2747 if (!Visited.insert(User).second)
2750 for (const auto &U : User->users()) {
2751 auto Inst = dyn_cast<Instruction>(U);
2755 if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
2756 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2757 if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast())
2759 UsersToVisit.push_back(U);
2760 } else if (const ShuffleVectorInst *ShufInst =
2761 dyn_cast<ShuffleVectorInst>(U)) {
2762 // Detect the following pattern: A ShuffleVector instruction together
2763 // with a reduction that do partial reduction on the first and second
2764 // ElemNumToReduce / 2 elements, and store the result in
2765 // ElemNumToReduce / 2 elements in another vector.
2767 unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
2768 if (ResultElements < ElemNum)
2771 if (ElemNumToReduce == 1)
2773 if (!isa<UndefValue>(U->getOperand(1)))
2775 for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
2776 if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
2778 for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
2779 if (ShufInst->getMaskValue(i) != -1)
2782 // There is only one user of this ShuffleVector instruction, which
2783 // must be a reduction operation.
2784 if (!U->hasOneUse())
2787 auto U2 = dyn_cast<Instruction>(*U->user_begin());
2788 if (!U2 || U2->getOpcode() != OpCode)
2791 // Check operands of the reduction operation.
2792 if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
2793 (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
2794 UsersToVisit.push_back(U2);
2795 ElemNumToReduce /= 2;
2798 } else if (isa<ExtractElementInst>(U)) {
2799 // At this moment we should have reduced all elements in the vector.
2800 if (ElemNumToReduce != 1)
2803 const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
2804 if (!Val || !Val->isZero())
2807 ReduxExtracted = true;
2812 return ReduxExtracted;
2815 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
2818 SDValue Op = getValue(I.getOperand(0));
2819 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
2821 setValue(&I, UnNodeValue);
2824 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
2826 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
2827 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
2828 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
2830 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
2831 Flags.setExact(ExactOp->isExact());
2833 if (isVectorReductionOp(&I)) {
2834 Flags.setVectorReduction(true);
2835 LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
2838 SDValue Op1 = getValue(I.getOperand(0));
2839 SDValue Op2 = getValue(I.getOperand(1));
2840 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
2842 setValue(&I, BinNodeValue);
2845 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2846 SDValue Op1 = getValue(I.getOperand(0));
2847 SDValue Op2 = getValue(I.getOperand(1));
2849 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
2850 Op1.getValueType(), DAG.getDataLayout());
2852 // Coerce the shift amount to the right type if we can.
2853 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2854 unsigned ShiftSize = ShiftTy.getSizeInBits();
2855 unsigned Op2Size = Op2.getValueSizeInBits();
2856 SDLoc DL = getCurSDLoc();
2858 // If the operand is smaller than the shift count type, promote it.
2859 if (ShiftSize > Op2Size)
2860 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2862 // If the operand is larger than the shift count type but the shift
2863 // count type has enough bits to represent any shift value, truncate
2864 // it now. This is a common case and it exposes the truncate to
2865 // optimization early.
2866 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
2867 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2868 // Otherwise we'll need to temporarily settle for some other convenient
2869 // type. Type legalization will make adjustments once the shiftee is split.
2871 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2878 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2880 if (const OverflowingBinaryOperator *OFBinOp =
2881 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2882 nuw = OFBinOp->hasNoUnsignedWrap();
2883 nsw = OFBinOp->hasNoSignedWrap();
2885 if (const PossiblyExactOperator *ExactOp =
2886 dyn_cast<const PossiblyExactOperator>(&I))
2887 exact = ExactOp->isExact();
2890 Flags.setExact(exact);
2891 Flags.setNoSignedWrap(nsw);
2892 Flags.setNoUnsignedWrap(nuw);
2893 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2898 void SelectionDAGBuilder::visitSDiv(const User &I) {
2899 SDValue Op1 = getValue(I.getOperand(0));
2900 SDValue Op2 = getValue(I.getOperand(1));
2903 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
2904 cast<PossiblyExactOperator>(&I)->isExact());
2905 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
2909 void SelectionDAGBuilder::visitICmp(const User &I) {
2910 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2911 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2912 predicate = IC->getPredicate();
2913 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2914 predicate = ICmpInst::Predicate(IC->getPredicate());
2915 SDValue Op1 = getValue(I.getOperand(0));
2916 SDValue Op2 = getValue(I.getOperand(1));
2917 ISD::CondCode Opcode = getICmpCondCode(predicate);
2919 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2921 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2924 void SelectionDAGBuilder::visitFCmp(const User &I) {
2925 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2926 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2927 predicate = FC->getPredicate();
2928 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2929 predicate = FCmpInst::Predicate(FC->getPredicate());
2930 SDValue Op1 = getValue(I.getOperand(0));
2931 SDValue Op2 = getValue(I.getOperand(1));
2933 ISD::CondCode Condition = getFCmpCondCode(predicate);
2934 auto *FPMO = dyn_cast<FPMathOperator>(&I);
2935 if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
2936 Condition = getFCmpCodeWithoutNaN(Condition);
2938 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2940 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2943 // Check if the condition of the select has one use or two users that are both
2944 // selects with the same condition.
2945 static bool hasOnlySelectUsers(const Value *Cond) {
2946 return llvm::all_of(Cond->users(), [](const Value *V) {
2947 return isa<SelectInst>(V);
2951 void SelectionDAGBuilder::visitSelect(const User &I) {
2952 SmallVector<EVT, 4> ValueVTs;
2953 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
2955 unsigned NumValues = ValueVTs.size();
2956 if (NumValues == 0) return;
2958 SmallVector<SDValue, 4> Values(NumValues);
2959 SDValue Cond = getValue(I.getOperand(0));
2960 SDValue LHSVal = getValue(I.getOperand(1));
2961 SDValue RHSVal = getValue(I.getOperand(2));
2962 auto BaseOps = {Cond};
2963 ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2964 ISD::VSELECT : ISD::SELECT;
2966 // Min/max matching is only viable if all output VTs are the same.
2967 if (is_splat(ValueVTs)) {
2968 EVT VT = ValueVTs[0];
2969 LLVMContext &Ctx = *DAG.getContext();
2970 auto &TLI = DAG.getTargetLoweringInfo();
2972 // We care about the legality of the operation after it has been type
2974 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal &&
2975 VT != TLI.getTypeToTransformTo(Ctx, VT))
2976 VT = TLI.getTypeToTransformTo(Ctx, VT);
2978 // If the vselect is legal, assume we want to leave this as a vector setcc +
2979 // vselect. Otherwise, if this is going to be scalarized, we want to see if
2980 // min/max is legal on the scalar type.
2981 bool UseScalarMinMax = VT.isVector() &&
2982 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
2985 auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
2986 ISD::NodeType Opc = ISD::DELETED_NODE;
2987 switch (SPR.Flavor) {
2988 case SPF_UMAX: Opc = ISD::UMAX; break;
2989 case SPF_UMIN: Opc = ISD::UMIN; break;
2990 case SPF_SMAX: Opc = ISD::SMAX; break;
2991 case SPF_SMIN: Opc = ISD::SMIN; break;
2993 switch (SPR.NaNBehavior) {
2994 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2995 case SPNB_RETURNS_NAN: Opc = ISD::FMINIMUM; break;
2996 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
2997 case SPNB_RETURNS_ANY: {
2998 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
3000 else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
3001 Opc = ISD::FMINIMUM;
3002 else if (UseScalarMinMax)
3003 Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
3004 ISD::FMINNUM : ISD::FMINIMUM;
3010 switch (SPR.NaNBehavior) {
3011 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3012 case SPNB_RETURNS_NAN: Opc = ISD::FMAXIMUM; break;
3013 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3014 case SPNB_RETURNS_ANY:
3016 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3018 else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
3019 Opc = ISD::FMAXIMUM;
3020 else if (UseScalarMinMax)
3021 Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3022 ISD::FMAXNUM : ISD::FMAXIMUM;
3029 if (Opc != ISD::DELETED_NODE &&
3030 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3032 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3033 // If the underlying comparison instruction is used by any other
3034 // instruction, the consumed instructions won't be destroyed, so it is
3035 // not profitable to convert to a min/max.
3036 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3038 LHSVal = getValue(LHS);
3039 RHSVal = getValue(RHS);
3044 for (unsigned i = 0; i != NumValues; ++i) {
3045 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3046 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3047 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3048 Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
3049 LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
3053 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3054 DAG.getVTList(ValueVTs), Values));
3057 void SelectionDAGBuilder::visitTrunc(const User &I) {
3058 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3059 SDValue N = getValue(I.getOperand(0));
3060 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3062 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3065 void SelectionDAGBuilder::visitZExt(const User &I) {
3066 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3067 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3068 SDValue N = getValue(I.getOperand(0));
3069 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3071 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3074 void SelectionDAGBuilder::visitSExt(const User &I) {
3075 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3076 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3077 SDValue N = getValue(I.getOperand(0));
3078 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3080 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3083 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3084 // FPTrunc is never a no-op cast, no need to check
3085 SDValue N = getValue(I.getOperand(0));
3086 SDLoc dl = getCurSDLoc();
3087 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3088 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3089 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3090 DAG.getTargetConstant(
3091 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3094 void SelectionDAGBuilder::visitFPExt(const User &I) {
3095 // FPExt is never a no-op cast, no need to check
3096 SDValue N = getValue(I.getOperand(0));
3097 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3099 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3102 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3103 // FPToUI is never a no-op cast, no need to check
3104 SDValue N = getValue(I.getOperand(0));
3105 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3107 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3110 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3111 // FPToSI is never a no-op cast, no need to check
3112 SDValue N = getValue(I.getOperand(0));
3113 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3115 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3118 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3119 // UIToFP is never a no-op cast, no need to check
3120 SDValue N = getValue(I.getOperand(0));
3121 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3123 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3126 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3127 // SIToFP is never a no-op cast, no need to check
3128 SDValue N = getValue(I.getOperand(0));
3129 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3131 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3134 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3135 // What to do depends on the size of the integer and the size of the pointer.
3136 // We can either truncate, zero extend, or no-op, accordingly.
3137 SDValue N = getValue(I.getOperand(0));
3138 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3140 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3143 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3144 // What to do depends on the size of the integer and the size of the pointer.
3145 // We can either truncate, zero extend, or no-op, accordingly.
3146 SDValue N = getValue(I.getOperand(0));
3147 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3149 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3152 void SelectionDAGBuilder::visitBitCast(const User &I) {
3153 SDValue N = getValue(I.getOperand(0));
3154 SDLoc dl = getCurSDLoc();
3155 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3158 // BitCast assures us that source and destination are the same size so this is
3159 // either a BITCAST or a no-op.
3160 if (DestVT != N.getValueType())
3161 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3162 DestVT, N)); // convert types.
3163 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3164 // might fold any kind of constant expression to an integer constant and that
3165 // is not what we are looking for. Only recognize a bitcast of a genuine
3166 // constant integer as an opaque constant.
3167 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3168 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3171 setValue(&I, N); // noop cast.
3174 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3175 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3176 const Value *SV = I.getOperand(0);
3177 SDValue N = getValue(SV);
3178 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3180 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3181 unsigned DestAS = I.getType()->getPointerAddressSpace();
3183 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3184 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3189 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3190 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3191 SDValue InVec = getValue(I.getOperand(0));
3192 SDValue InVal = getValue(I.getOperand(1));
3193 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3194 TLI.getVectorIdxTy(DAG.getDataLayout()));
3195 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3196 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3197 InVec, InVal, InIdx));
3200 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3201 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3202 SDValue InVec = getValue(I.getOperand(0));
3203 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3204 TLI.getVectorIdxTy(DAG.getDataLayout()));
3205 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3206 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3210 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3211 SDValue Src1 = getValue(I.getOperand(0));
3212 SDValue Src2 = getValue(I.getOperand(1));
3213 SDLoc DL = getCurSDLoc();
3215 SmallVector<int, 8> Mask;
3216 ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
3217 unsigned MaskNumElts = Mask.size();
3219 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3220 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3221 EVT SrcVT = Src1.getValueType();
3222 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3224 if (SrcNumElts == MaskNumElts) {
3225 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3229 // Normalize the shuffle vector since mask and vector length don't match.
3230 if (SrcNumElts < MaskNumElts) {
3231 // Mask is longer than the source vectors. We can use concatenate vector to
3232 // make the mask and vectors lengths match.
3234 if (MaskNumElts % SrcNumElts == 0) {
3235 // Mask length is a multiple of the source vector length.
3236 // Check if the shuffle is some kind of concatenation of the input
3238 unsigned NumConcat = MaskNumElts / SrcNumElts;
3239 bool IsConcat = true;
3240 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3241 for (unsigned i = 0; i != MaskNumElts; ++i) {
3245 // Ensure the indices in each SrcVT sized piece are sequential and that
3246 // the same source is used for the whole piece.
3247 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3248 (ConcatSrcs[i / SrcNumElts] >= 0 &&
3249 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3253 // Remember which source this index came from.
3254 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3257 // The shuffle is concatenating multiple vectors together. Just emit
3258 // a CONCAT_VECTORS operation.
3260 SmallVector<SDValue, 8> ConcatOps;
3261 for (auto Src : ConcatSrcs) {
3263 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3265 ConcatOps.push_back(Src1);
3267 ConcatOps.push_back(Src2);
3269 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3274 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3275 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3276 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3279 // Pad both vectors with undefs to make them the same length as the mask.
3280 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3282 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3283 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3287 Src1 = Src1.isUndef()
3288 ? DAG.getUNDEF(PaddedVT)
3289 : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3290 Src2 = Src2.isUndef()
3291 ? DAG.getUNDEF(PaddedVT)
3292 : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3294 // Readjust mask for new input vector length.
3295 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3296 for (unsigned i = 0; i != MaskNumElts; ++i) {
3298 if (Idx >= (int)SrcNumElts)
3299 Idx -= SrcNumElts - PaddedMaskNumElts;
3303 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3305 // If the concatenated vector was padded, extract a subvector with the
3306 // correct number of elements.
3307 if (MaskNumElts != PaddedMaskNumElts)
3308 Result = DAG.getNode(
3309 ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3310 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
3312 setValue(&I, Result);
3316 if (SrcNumElts > MaskNumElts) {
3317 // Analyze the access pattern of the vector to see if we can extract
3318 // two subvectors and do the shuffle.
3319 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from
3320 bool CanExtract = true;
3321 for (int Idx : Mask) {
3326 if (Idx >= (int)SrcNumElts) {
3331 // If all the indices come from the same MaskNumElts sized portion of
3332 // the sources we can use extract. Also make sure the extract wouldn't
3333 // extract past the end of the source.
3334 int NewStartIdx = alignDown(Idx, MaskNumElts);
3335 if (NewStartIdx + MaskNumElts > SrcNumElts ||
3336 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3338 // Make sure we always update StartIdx as we use it to track if all
3339 // elements are undef.
3340 StartIdx[Input] = NewStartIdx;
3343 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3344 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3348 // Extract appropriate subvector and generate a vector shuffle
3349 for (unsigned Input = 0; Input < 2; ++Input) {
3350 SDValue &Src = Input == 0 ? Src1 : Src2;
3351 if (StartIdx[Input] < 0)
3352 Src = DAG.getUNDEF(VT);
3355 ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3356 DAG.getConstant(StartIdx[Input], DL,
3357 TLI.getVectorIdxTy(DAG.getDataLayout())));
3361 // Calculate new mask.
3362 SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3363 for (int &Idx : MappedOps) {
3364 if (Idx >= (int)SrcNumElts)
3365 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3370 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3375 // We can't use either concat vectors or extract subvectors so fall back to
3376 // replacing the shuffle with extract and build vector.
3377 // to insert and build vector.
3378 EVT EltVT = VT.getVectorElementType();
3379 EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
3380 SmallVector<SDValue,8> Ops;
3381 for (int Idx : Mask) {
3385 Res = DAG.getUNDEF(EltVT);
3387 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3388 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3390 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3391 EltVT, Src, DAG.getConstant(Idx, DL, IdxVT));
3397 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3400 void SelectionDAGBuilder::visitInsertValue(const User &I) {
3401 ArrayRef<unsigned> Indices;
3402 if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3403 Indices = IV->getIndices();
3405 Indices = cast<ConstantExpr>(&I)->getIndices();
3407 const Value *Op0 = I.getOperand(0);
3408 const Value *Op1 = I.getOperand(1);
3409 Type *AggTy = I.getType();
3410 Type *ValTy = Op1->getType();
3411 bool IntoUndef = isa<UndefValue>(Op0);
3412 bool FromUndef = isa<UndefValue>(Op1);
3414 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3416 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3417 SmallVector<EVT, 4> AggValueVTs;
3418 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3419 SmallVector<EVT, 4> ValValueVTs;
3420 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3422 unsigned NumAggValues = AggValueVTs.size();
3423 unsigned NumValValues = ValValueVTs.size();
3424 SmallVector<SDValue, 4> Values(NumAggValues);
3426 // Ignore an insertvalue that produces an empty object
3427 if (!NumAggValues) {
3428 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3432 SDValue Agg = getValue(Op0);
3434 // Copy the beginning value(s) from the original aggregate.
3435 for (; i != LinearIndex; ++i)
3436 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3437 SDValue(Agg.getNode(), Agg.getResNo() + i);
3438 // Copy values from the inserted value(s).
3440 SDValue Val = getValue(Op1);
3441 for (; i != LinearIndex + NumValValues; ++i)
3442 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3443 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3445 // Copy remaining value(s) from the original aggregate.
3446 for (; i != NumAggValues; ++i)
3447 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3448 SDValue(Agg.getNode(), Agg.getResNo() + i);
3450 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3451 DAG.getVTList(AggValueVTs), Values));
3454 void SelectionDAGBuilder::visitExtractValue(const User &I) {
3455 ArrayRef<unsigned> Indices;
3456 if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3457 Indices = EV->getIndices();
3459 Indices = cast<ConstantExpr>(&I)->getIndices();
3461 const Value *Op0 = I.getOperand(0);
3462 Type *AggTy = Op0->getType();
3463 Type *ValTy = I.getType();
3464 bool OutOfUndef = isa<UndefValue>(Op0);
3466 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3468 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3469 SmallVector<EVT, 4> ValValueVTs;
3470 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3472 unsigned NumValValues = ValValueVTs.size();
3474 // Ignore a extractvalue that produces an empty object
3475 if (!NumValValues) {
3476 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3480 SmallVector<SDValue, 4> Values(NumValValues);
3482 SDValue Agg = getValue(Op0);
3483 // Copy out the selected value(s).
3484 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3485 Values[i - LinearIndex] =
3487 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3488 SDValue(Agg.getNode(), Agg.getResNo() + i);
3490 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3491 DAG.getVTList(ValValueVTs), Values));
3494 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3495 Value *Op0 = I.getOperand(0);
3496 // Note that the pointer operand may be a vector of pointers. Take the scalar
3497 // element which holds a pointer.
3498 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3499 SDValue N = getValue(Op0);
3500 SDLoc dl = getCurSDLoc();
3502 // Normalize Vector GEP - all scalar operands should be converted to the
3504 unsigned VectorWidth = I.getType()->isVectorTy() ?
3505 cast<VectorType>(I.getType())->getVectorNumElements() : 0;
3507 if (VectorWidth && !N.getValueType().isVector()) {
3508 LLVMContext &Context = *DAG.getContext();
3509 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth);
3510 N = DAG.getSplatBuildVector(VT, dl, N);
3513 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3515 const Value *Idx = GTI.getOperand();
3516 if (StructType *StTy = GTI.getStructTypeOrNull()) {
3517 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3520 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3522 // In an inbounds GEP with an offset that is nonnegative even when
3523 // interpreted as signed, assume there is no unsigned overflow.
3525 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3526 Flags.setNoUnsignedWrap(true);
3528 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3529 DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3532 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3533 MVT IdxTy = MVT::getIntegerVT(IdxSize);
3534 APInt ElementSize(IdxSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3536 // If this is a scalar constant or a splat vector of constants,
3537 // handle it quickly.
3538 const auto *CI = dyn_cast<ConstantInt>(Idx);
3539 if (!CI && isa<ConstantDataVector>(Idx) &&
3540 cast<ConstantDataVector>(Idx)->getSplatValue())
3541 CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
3546 APInt Offs = ElementSize * CI->getValue().sextOrTrunc(IdxSize);
3547 LLVMContext &Context = *DAG.getContext();
3548 SDValue OffsVal = VectorWidth ?
3549 DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorWidth)) :
3550 DAG.getConstant(Offs, dl, IdxTy);
3552 // In an inbouds GEP with an offset that is nonnegative even when
3553 // interpreted as signed, assume there is no unsigned overflow.
3555 if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3556 Flags.setNoUnsignedWrap(true);
3558 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3562 // N = N + Idx * ElementSize;
3563 SDValue IdxN = getValue(Idx);
3565 if (!IdxN.getValueType().isVector() && VectorWidth) {
3566 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), VectorWidth);
3567 IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3570 // If the index is smaller or larger than intptr_t, truncate or extend
3572 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3574 // If this is a multiply by a power of two, turn it into a shl
3575 // immediately. This is a very common case.
3576 if (ElementSize != 1) {
3577 if (ElementSize.isPowerOf2()) {
3578 unsigned Amt = ElementSize.logBase2();
3579 IdxN = DAG.getNode(ISD::SHL, dl,
3580 N.getValueType(), IdxN,
3581 DAG.getConstant(Amt, dl, IdxN.getValueType()));
3583 SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
3584 IdxN = DAG.getNode(ISD::MUL, dl,
3585 N.getValueType(), IdxN, Scale);
3589 N = DAG.getNode(ISD::ADD, dl,
3590 N.getValueType(), N, IdxN);
3597 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3598 // If this is a fixed sized alloca in the entry block of the function,
3599 // allocate it statically on the stack.
3600 if (FuncInfo.StaticAllocaMap.count(&I))
3601 return; // getValue will auto-populate this.
3603 SDLoc dl = getCurSDLoc();
3604 Type *Ty = I.getAllocatedType();
3605 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3606 auto &DL = DAG.getDataLayout();
3607 uint64_t TySize = DL.getTypeAllocSize(Ty);
3609 std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
3611 SDValue AllocSize = getValue(I.getArraySize());
3613 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
3614 if (AllocSize.getValueType() != IntPtr)
3615 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3617 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3619 DAG.getConstant(TySize, dl, IntPtr));
3621 // Handle alignment. If the requested alignment is less than or equal to
3622 // the stack alignment, ignore it. If the size is greater than or equal to
3623 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3624 unsigned StackAlign =
3625 DAG.getSubtarget().getFrameLowering()->getStackAlignment();
3626 if (Align <= StackAlign)
3629 // Round the size of the allocation up to the stack alignment size
3630 // by add SA-1 to the size. This doesn't overflow because we're computing
3631 // an address inside an alloca.
3633 Flags.setNoUnsignedWrap(true);
3634 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3635 DAG.getConstant(StackAlign - 1, dl, IntPtr), Flags);
3637 // Mask out the low bits for alignment purposes.
3639 DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
3640 DAG.getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr));
3642 SDValue Ops[] = {getRoot(), AllocSize, DAG.getConstant(Align, dl, IntPtr)};
3643 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3644 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3646 DAG.setRoot(DSA.getValue(1));
3648 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3651 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3653 return visitAtomicLoad(I);
3655 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3656 const Value *SV = I.getOperand(0);
3657 if (TLI.supportSwiftError()) {
3658 // Swifterror values can come from either a function parameter with
3659 // swifterror attribute or an alloca with swifterror attribute.
3660 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3661 if (Arg->hasSwiftErrorAttr())
3662 return visitLoadFromSwiftError(I);
3665 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3666 if (Alloca->isSwiftError())
3667 return visitLoadFromSwiftError(I);
3671 SDValue Ptr = getValue(SV);
3673 Type *Ty = I.getType();
3675 bool isVolatile = I.isVolatile();
3676 bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3677 bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr;
3678 bool isDereferenceable = isDereferenceablePointer(SV, DAG.getDataLayout());
3679 unsigned Alignment = I.getAlignment();
3682 I.getAAMetadata(AAInfo);
3683 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3685 SmallVector<EVT, 4> ValueVTs;
3686 SmallVector<uint64_t, 4> Offsets;
3687 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
3688 unsigned NumValues = ValueVTs.size();
3693 bool ConstantMemory = false;
3694 if (isVolatile || NumValues > MaxParallelChains)
3695 // Serialize volatile loads with other side effects.
3697 else if (AA && AA->pointsToConstantMemory(MemoryLocation(
3698 SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) {
3699 // Do not serialize (non-volatile) loads of constant memory with anything.
3700 Root = DAG.getEntryNode();
3701 ConstantMemory = true;
3703 // Do not serialize non-volatile loads against each other.
3704 Root = DAG.getRoot();
3707 SDLoc dl = getCurSDLoc();
3710 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
3712 // An aggregate load cannot wrap around the address space, so offsets to its
3713 // parts don't wrap either.
3715 Flags.setNoUnsignedWrap(true);
3717 SmallVector<SDValue, 4> Values(NumValues);
3718 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3719 EVT PtrVT = Ptr.getValueType();
3720 unsigned ChainI = 0;
3721 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3722 // Serializing loads here may result in excessive register pressure, and
3723 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3724 // could recover a bit by hoisting nodes upward in the chain by recognizing
3725 // they are side-effect free or do not alias. The optimizer should really
3726 // avoid this case by converting large object/array copies to llvm.memcpy
3727 // (MaxParallelChains should always remain as failsafe).
3728 if (ChainI == MaxParallelChains) {
3729 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3730 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3731 makeArrayRef(Chains.data(), ChainI));
3735 SDValue A = DAG.getNode(ISD::ADD, dl,
3737 DAG.getConstant(Offsets[i], dl, PtrVT),
3739 auto MMOFlags = MachineMemOperand::MONone;
3741 MMOFlags |= MachineMemOperand::MOVolatile;
3743 MMOFlags |= MachineMemOperand::MONonTemporal;
3745 MMOFlags |= MachineMemOperand::MOInvariant;
3746 if (isDereferenceable)
3747 MMOFlags |= MachineMemOperand::MODereferenceable;
3748 MMOFlags |= TLI.getMMOFlags(I);
3750 SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A,
3751 MachinePointerInfo(SV, Offsets[i]), Alignment,
3752 MMOFlags, AAInfo, Ranges);
3755 Chains[ChainI] = L.getValue(1);
3758 if (!ConstantMemory) {
3759 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3760 makeArrayRef(Chains.data(), ChainI));
3764 PendingLoads.push_back(Chain);
3767 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
3768 DAG.getVTList(ValueVTs), Values));
3771 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
3772 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
3773 "call visitStoreToSwiftError when backend supports swifterror");
3775 SmallVector<EVT, 4> ValueVTs;
3776 SmallVector<uint64_t, 4> Offsets;
3777 const Value *SrcV = I.getOperand(0);
3778 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3779 SrcV->getType(), ValueVTs, &Offsets);
3780 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3781 "expect a single EVT for swifterror");
3783 SDValue Src = getValue(SrcV);
3784 // Create a virtual register, then update the virtual register.
3785 unsigned VReg; bool CreatedVReg;
3786 std::tie(VReg, CreatedVReg) = FuncInfo.getOrCreateSwiftErrorVRegDefAt(&I);
3787 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
3788 // Chain can be getRoot or getControlRoot.
3789 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
3790 SDValue(Src.getNode(), Src.getResNo()));
3791 DAG.setRoot(CopyNode);
3793 FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
3796 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
3797 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
3798 "call visitLoadFromSwiftError when backend supports swifterror");
3800 assert(!I.isVolatile() &&
3801 I.getMetadata(LLVMContext::MD_nontemporal) == nullptr &&
3802 I.getMetadata(LLVMContext::MD_invariant_load) == nullptr &&
3803 "Support volatile, non temporal, invariant for load_from_swift_error");
3805 const Value *SV = I.getOperand(0);
3806 Type *Ty = I.getType();
3808 I.getAAMetadata(AAInfo);
3809 assert((!AA || !AA->pointsToConstantMemory(MemoryLocation(
3810 SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) &&
3811 "load_from_swift_error should not be constant memory");
3813 SmallVector<EVT, 4> ValueVTs;
3814 SmallVector<uint64_t, 4> Offsets;
3815 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
3816 ValueVTs, &Offsets);
3817 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3818 "expect a single EVT for swifterror");
3820 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
3821 SDValue L = DAG.getCopyFromReg(
3822 getRoot(), getCurSDLoc(),
3823 FuncInfo.getOrCreateSwiftErrorVRegUseAt(&I, FuncInfo.MBB, SV).first,
3829 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3831 return visitAtomicStore(I);
3833 const Value *SrcV = I.getOperand(0);
3834 const Value *PtrV = I.getOperand(1);
3836 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3837 if (TLI.supportSwiftError()) {
3838 // Swifterror values can come from either a function parameter with
3839 // swifterror attribute or an alloca with swifterror attribute.
3840 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
3841 if (Arg->hasSwiftErrorAttr())
3842 return visitStoreToSwiftError(I);
3845 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
3846 if (Alloca->isSwiftError())
3847 return visitStoreToSwiftError(I);
3851 SmallVector<EVT, 4> ValueVTs;
3852 SmallVector<uint64_t, 4> Offsets;
3853 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3854 SrcV->getType(), ValueVTs, &Offsets);
3855 unsigned NumValues = ValueVTs.size();
3859 // Get the lowered operands. Note that we do this after
3860 // checking if NumResults is zero, because with zero results
3861 // the operands won't have values in the map.
3862 SDValue Src = getValue(SrcV);
3863 SDValue Ptr = getValue(PtrV);
3865 SDValue Root = getRoot();
3866 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3867 SDLoc dl = getCurSDLoc();
3868 EVT PtrVT = Ptr.getValueType();
3869 unsigned Alignment = I.getAlignment();
3871 I.getAAMetadata(AAInfo);
3873 auto MMOFlags = MachineMemOperand::MONone;
3875 MMOFlags |= MachineMemOperand::MOVolatile;
3876 if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
3877 MMOFlags |= MachineMemOperand::MONonTemporal;
3878 MMOFlags |= TLI.getMMOFlags(I);
3880 // An aggregate load cannot wrap around the address space, so offsets to its
3881 // parts don't wrap either.
3883 Flags.setNoUnsignedWrap(true);
3885 unsigned ChainI = 0;
3886 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3887 // See visitLoad comments.
3888 if (ChainI == MaxParallelChains) {
3889 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3890 makeArrayRef(Chains.data(), ChainI));
3894 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3895 DAG.getConstant(Offsets[i], dl, PtrVT), Flags);
3896 SDValue St = DAG.getStore(
3897 Root, dl, SDValue(Src.getNode(), Src.getResNo() + i), Add,
3898 MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo);
3899 Chains[ChainI] = St;
3902 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3903 makeArrayRef(Chains.data(), ChainI));
3904 DAG.setRoot(StoreNode);
3907 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
3908 bool IsCompressing) {
3909 SDLoc sdl = getCurSDLoc();
3911 auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3912 unsigned& Alignment) {
3913 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
3914 Src0 = I.getArgOperand(0);
3915 Ptr = I.getArgOperand(1);
3916 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3917 Mask = I.getArgOperand(3);
3919 auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3920 unsigned& Alignment) {
3921 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
3922 Src0 = I.getArgOperand(0);
3923 Ptr = I.getArgOperand(1);
3924 Mask = I.getArgOperand(2);
3928 Value *PtrOperand, *MaskOperand, *Src0Operand;
3931 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3933 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3935 SDValue Ptr = getValue(PtrOperand);
3936 SDValue Src0 = getValue(Src0Operand);
3937 SDValue Mask = getValue(MaskOperand);
3939 EVT VT = Src0.getValueType();
3941 Alignment = DAG.getEVTAlignment(VT);
3944 I.getAAMetadata(AAInfo);
3946 MachineMemOperand *MMO =
3947 DAG.getMachineFunction().
3948 getMachineMemOperand(MachinePointerInfo(PtrOperand),
3949 MachineMemOperand::MOStore, VT.getStoreSize(),
3951 SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
3952 MMO, false /* Truncating */,
3954 DAG.setRoot(StoreNode);
3955 setValue(&I, StoreNode);
3958 // Get a uniform base for the Gather/Scatter intrinsic.
3959 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
3960 // We try to represent it as a base pointer + vector of indices.
3961 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
3962 // The first operand of the GEP may be a single pointer or a vector of pointers
3964 // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
3966 // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
3967 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
3969 // When the first GEP operand is a single pointer - it is the uniform base we
3970 // are looking for. If first operand of the GEP is a splat vector - we
3971 // extract the splat value and use it as a uniform base.
3972 // In all other cases the function returns 'false'.
3973 static bool getUniformBase(const Value* &Ptr, SDValue& Base, SDValue& Index,
3974 SDValue &Scale, SelectionDAGBuilder* SDB) {
3975 SelectionDAG& DAG = SDB->DAG;
3976 LLVMContext &Context = *DAG.getContext();
3978 assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
3979 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
3983 const Value *GEPPtr = GEP->getPointerOperand();
3984 if (!GEPPtr->getType()->isVectorTy())
3986 else if (!(Ptr = getSplatValue(GEPPtr)))
3989 unsigned FinalIndex = GEP->getNumOperands() - 1;
3990 Value *IndexVal = GEP->getOperand(FinalIndex);
3992 // Ensure all the other indices are 0.
3993 for (unsigned i = 1; i < FinalIndex; ++i) {
3994 auto *C = dyn_cast<ConstantInt>(GEP->getOperand(i));
3995 if (!C || !C->isZero())
3999 // The operands of the GEP may be defined in another basic block.
4000 // In this case we'll not find nodes for the operands.
4001 if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal))
4004 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4005 const DataLayout &DL = DAG.getDataLayout();
4006 Scale = DAG.getTargetConstant(DL.getTypeAllocSize(GEP->getResultElementType()),
4007 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4008 Base = SDB->getValue(Ptr);
4009 Index = SDB->getValue(IndexVal);
4011 if (!Index.getValueType().isVector()) {
4012 unsigned GEPWidth = GEP->getType()->getVectorNumElements();
4013 EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
4014 Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
4019 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4020 SDLoc sdl = getCurSDLoc();
4022 // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
4023 const Value *Ptr = I.getArgOperand(1);
4024 SDValue Src0 = getValue(I.getArgOperand(0));
4025 SDValue Mask = getValue(I.getArgOperand(3));
4026 EVT VT = Src0.getValueType();
4027 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
4029 Alignment = DAG.getEVTAlignment(VT);
4030 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4033 I.getAAMetadata(AAInfo);
4038 const Value *BasePtr = Ptr;
4039 bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4041 const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
4042 MachineMemOperand *MMO = DAG.getMachineFunction().
4043 getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
4044 MachineMemOperand::MOStore, VT.getStoreSize(),
4047 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4048 Index = getValue(Ptr);
4049 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4051 SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index, Scale };
4052 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4054 DAG.setRoot(Scatter);
4055 setValue(&I, Scatter);
4058 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4059 SDLoc sdl = getCurSDLoc();
4061 auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4062 unsigned& Alignment) {
4063 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4064 Ptr = I.getArgOperand(0);
4065 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
4066 Mask = I.getArgOperand(2);
4067 Src0 = I.getArgOperand(3);
4069 auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4070 unsigned& Alignment) {
4071 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4072 Ptr = I.getArgOperand(0);
4074 Mask = I.getArgOperand(1);
4075 Src0 = I.getArgOperand(2);
4078 Value *PtrOperand, *MaskOperand, *Src0Operand;
4081 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4083 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4085 SDValue Ptr = getValue(PtrOperand);
4086 SDValue Src0 = getValue(Src0Operand);
4087 SDValue Mask = getValue(MaskOperand);
4089 EVT VT = Src0.getValueType();
4091 Alignment = DAG.getEVTAlignment(VT);
4094 I.getAAMetadata(AAInfo);
4095 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4097 // Do not serialize masked loads of constant memory with anything.
4098 bool AddToChain = !AA || !AA->pointsToConstantMemory(MemoryLocation(
4099 PtrOperand, DAG.getDataLayout().getTypeStoreSize(I.getType()), AAInfo));
4100 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4102 MachineMemOperand *MMO =
4103 DAG.getMachineFunction().
4104 getMachineMemOperand(MachinePointerInfo(PtrOperand),
4105 MachineMemOperand::MOLoad, VT.getStoreSize(),
4106 Alignment, AAInfo, Ranges);
4108 SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
4109 ISD::NON_EXTLOAD, IsExpanding);
4111 PendingLoads.push_back(Load.getValue(1));
4115 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4116 SDLoc sdl = getCurSDLoc();
4118 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4119 const Value *Ptr = I.getArgOperand(0);
4120 SDValue Src0 = getValue(I.getArgOperand(3));
4121 SDValue Mask = getValue(I.getArgOperand(2));
4123 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4124 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4125 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
4127 Alignment = DAG.getEVTAlignment(VT);
4130 I.getAAMetadata(AAInfo);
4131 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4133 SDValue Root = DAG.getRoot();
4137 const Value *BasePtr = Ptr;
4138 bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4139 bool ConstantMemory = false;
4141 AA && AA->pointsToConstantMemory(MemoryLocation(
4142 BasePtr, DAG.getDataLayout().getTypeStoreSize(I.getType()),
4144 // Do not serialize (non-volatile) loads of constant memory with anything.
4145 Root = DAG.getEntryNode();
4146 ConstantMemory = true;
4149 MachineMemOperand *MMO =
4150 DAG.getMachineFunction().
4151 getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
4152 MachineMemOperand::MOLoad, VT.getStoreSize(),
4153 Alignment, AAInfo, Ranges);
4156 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4157 Index = getValue(Ptr);
4158 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4160 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4161 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4164 SDValue OutChain = Gather.getValue(1);
4165 if (!ConstantMemory)
4166 PendingLoads.push_back(OutChain);
4167 setValue(&I, Gather);
4170 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4171 SDLoc dl = getCurSDLoc();
4172 AtomicOrdering SuccessOrder = I.getSuccessOrdering();
4173 AtomicOrdering FailureOrder = I.getFailureOrdering();
4174 SyncScope::ID SSID = I.getSyncScopeID();
4176 SDValue InChain = getRoot();
4178 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4179 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4180 SDValue L = DAG.getAtomicCmpSwap(
4181 ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
4182 getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
4183 getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
4184 /*Alignment=*/ 0, SuccessOrder, FailureOrder, SSID);
4186 SDValue OutChain = L.getValue(2);
4189 DAG.setRoot(OutChain);
4192 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4193 SDLoc dl = getCurSDLoc();
4195 switch (I.getOperation()) {
4196 default: llvm_unreachable("Unknown atomicrmw operation");
4197 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4198 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
4199 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
4200 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
4201 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4202 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
4203 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
4204 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
4205 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
4206 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4207 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4209 AtomicOrdering Order = I.getOrdering();
4210 SyncScope::ID SSID = I.getSyncScopeID();
4212 SDValue InChain = getRoot();
4215 DAG.getAtomic(NT, dl,
4216 getValue(I.getValOperand()).getSimpleValueType(),
4218 getValue(I.getPointerOperand()),
4219 getValue(I.getValOperand()),
4220 I.getPointerOperand(),
4221 /* Alignment=*/ 0, Order, SSID);
4223 SDValue OutChain = L.getValue(1);
4226 DAG.setRoot(OutChain);
4229 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4230 SDLoc dl = getCurSDLoc();
4231 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4234 Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
4235 TLI.getFenceOperandTy(DAG.getDataLayout()));
4236 Ops[2] = DAG.getConstant(I.getSyncScopeID(), dl,
4237 TLI.getFenceOperandTy(DAG.getDataLayout()));
4238 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4241 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4242 SDLoc dl = getCurSDLoc();
4243 AtomicOrdering Order = I.getOrdering();
4244 SyncScope::ID SSID = I.getSyncScopeID();
4246 SDValue InChain = getRoot();
4248 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4249 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4251 if (!TLI.supportsUnalignedAtomics() &&
4252 I.getAlignment() < VT.getStoreSize())
4253 report_fatal_error("Cannot generate unaligned atomic load");
4255 MachineMemOperand *MMO =
4256 DAG.getMachineFunction().
4257 getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4258 MachineMemOperand::MOVolatile |
4259 MachineMemOperand::MOLoad,
4261 I.getAlignment() ? I.getAlignment() :
4262 DAG.getEVTAlignment(VT),
4263 AAMDNodes(), nullptr, SSID, Order);
4265 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4267 DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
4268 getValue(I.getPointerOperand()), MMO);
4270 SDValue OutChain = L.getValue(1);
4273 DAG.setRoot(OutChain);
4276 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4277 SDLoc dl = getCurSDLoc();
4279 AtomicOrdering Order = I.getOrdering();
4280 SyncScope::ID SSID = I.getSyncScopeID();
4282 SDValue InChain = getRoot();
4284 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4286 TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4288 if (I.getAlignment() < VT.getStoreSize())
4289 report_fatal_error("Cannot generate unaligned atomic store");
4292 DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
4294 getValue(I.getPointerOperand()),
4295 getValue(I.getValueOperand()),
4296 I.getPointerOperand(), I.getAlignment(),
4299 DAG.setRoot(OutChain);
4302 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4304 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4305 unsigned Intrinsic) {
4306 // Ignore the callsite's attributes. A specific call site may be marked with
4307 // readnone, but the lowering code will expect the chain based on the
4309 const Function *F = I.getCalledFunction();
4310 bool HasChain = !F->doesNotAccessMemory();
4311 bool OnlyLoad = HasChain && F->onlyReadsMemory();
4313 // Build the operand list.
4314 SmallVector<SDValue, 8> Ops;
4315 if (HasChain) { // If this intrinsic has side-effects, chainify it.
4317 // We don't need to serialize loads against other loads.
4318 Ops.push_back(DAG.getRoot());
4320 Ops.push_back(getRoot());
4324 // Info is set by getTgtMemInstrinsic
4325 TargetLowering::IntrinsicInfo Info;
4326 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4327 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4328 DAG.getMachineFunction(),
4331 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4332 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4333 Info.opc == ISD::INTRINSIC_W_CHAIN)
4334 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4335 TLI.getPointerTy(DAG.getDataLayout())));
4337 // Add all operands of the call to the operand list.
4338 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4339 SDValue Op = getValue(I.getArgOperand(i));
4343 SmallVector<EVT, 4> ValueVTs;
4344 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4347 ValueVTs.push_back(MVT::Other);
4349 SDVTList VTs = DAG.getVTList(ValueVTs);
4353 if (IsTgtIntrinsic) {
4354 // This is target intrinsic that touches memory
4355 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs,
4357 MachinePointerInfo(Info.ptrVal, Info.offset), Info.align,
4358 Info.flags, Info.size);
4359 } else if (!HasChain) {
4360 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4361 } else if (!I.getType()->isVoidTy()) {
4362 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4364 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4368 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4370 PendingLoads.push_back(Chain);
4375 if (!I.getType()->isVoidTy()) {
4376 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4377 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4378 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4380 Result = lowerRangeToAssertZExt(DAG, I, Result);
4382 setValue(&I, Result);
4386 /// GetSignificand - Get the significand and build it into a floating-point
4387 /// number with exponent of 1:
4389 /// Op = (Op & 0x007fffff) | 0x3f800000;
4391 /// where Op is the hexadecimal representation of floating point value.
4392 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4393 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4394 DAG.getConstant(0x007fffff, dl, MVT::i32));
4395 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4396 DAG.getConstant(0x3f800000, dl, MVT::i32));
4397 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4400 /// GetExponent - Get the exponent:
4402 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4404 /// where Op is the hexadecimal representation of floating point value.
4405 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4406 const TargetLowering &TLI, const SDLoc &dl) {
4407 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4408 DAG.getConstant(0x7f800000, dl, MVT::i32));
4409 SDValue t1 = DAG.getNode(
4410 ISD::SRL, dl, MVT::i32, t0,
4411 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4412 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4413 DAG.getConstant(127, dl, MVT::i32));
4414 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4417 /// getF32Constant - Get 32-bit floating point constant.
4418 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4420 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4424 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4425 SelectionDAG &DAG) {
4426 // TODO: What fast-math-flags should be set on the floating-point nodes?
4428 // IntegerPartOfX = ((int32_t)(t0);
4429 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4431 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
4432 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4433 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4435 // IntegerPartOfX <<= 23;
4436 IntegerPartOfX = DAG.getNode(
4437 ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4438 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4439 DAG.getDataLayout())));
4441 SDValue TwoToFractionalPartOfX;
4442 if (LimitFloatPrecision <= 6) {
4443 // For floating-point precision of 6:
4445 // TwoToFractionalPartOfX =
4447 // (0.735607626f + 0.252464424f * x) * x;
4449 // error 0.0144103317, which is 6 bits
4450 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4451 getF32Constant(DAG, 0x3e814304, dl));
4452 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4453 getF32Constant(DAG, 0x3f3c50c8, dl));
4454 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4455 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4456 getF32Constant(DAG, 0x3f7f5e7e, dl));
4457 } else if (LimitFloatPrecision <= 12) {
4458 // For floating-point precision of 12:
4460 // TwoToFractionalPartOfX =
4463 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4465 // error 0.000107046256, which is 13 to 14 bits
4466 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4467 getF32Constant(DAG, 0x3da235e3, dl));
4468 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4469 getF32Constant(DAG, 0x3e65b8f3, dl));
4470 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4471 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4472 getF32Constant(DAG, 0x3f324b07, dl));
4473 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4474 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4475 getF32Constant(DAG, 0x3f7ff8fd, dl));
4476 } else { // LimitFloatPrecision <= 18
4477 // For floating-point precision of 18:
4479 // TwoToFractionalPartOfX =
4483 // (0.554906021e-1f +
4484 // (0.961591928e-2f +
4485 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4486 // error 2.47208000*10^(-7), which is better than 18 bits
4487 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4488 getF32Constant(DAG, 0x3924b03e, dl));
4489 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4490 getF32Constant(DAG, 0x3ab24b87, dl));
4491 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4492 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4493 getF32Constant(DAG, 0x3c1d8c17, dl));
4494 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4495 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4496 getF32Constant(DAG, 0x3d634a1d, dl));
4497 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4498 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4499 getF32Constant(DAG, 0x3e75fe14, dl));
4500 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4501 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4502 getF32Constant(DAG, 0x3f317234, dl));
4503 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4504 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4505 getF32Constant(DAG, 0x3f800000, dl));
4508 // Add the exponent into the result in integer domain.
4509 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4510 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4511 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4514 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4515 /// limited-precision mode.
4516 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4517 const TargetLowering &TLI) {
4518 if (Op.getValueType() == MVT::f32 &&
4519 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4521 // Put the exponent in the right bit position for later addition to the
4524 // #define LOG2OFe 1.4426950f
4525 // t0 = Op * LOG2OFe
4527 // TODO: What fast-math-flags should be set here?
4528 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4529 getF32Constant(DAG, 0x3fb8aa3b, dl));
4530 return getLimitedPrecisionExp2(t0, dl, DAG);
4533 // No special expansion.
4534 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4537 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4538 /// limited-precision mode.
4539 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4540 const TargetLowering &TLI) {
4541 // TODO: What fast-math-flags should be set on the floating-point nodes?
4543 if (Op.getValueType() == MVT::f32 &&
4544 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4545 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4547 // Scale the exponent by log(2) [0.69314718f].
4548 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4549 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4550 getF32Constant(DAG, 0x3f317218, dl));
4552 // Get the significand and build it into a floating-point number with
4554 SDValue X = GetSignificand(DAG, Op1, dl);
4556 SDValue LogOfMantissa;
4557 if (LimitFloatPrecision <= 6) {
4558 // For floating-point precision of 6:
4562 // (1.4034025f - 0.23903021f * x) * x;
4564 // error 0.0034276066, which is better than 8 bits
4565 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4566 getF32Constant(DAG, 0xbe74c456, dl));
4567 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4568 getF32Constant(DAG, 0x3fb3a2b1, dl));
4569 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4570 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4571 getF32Constant(DAG, 0x3f949a29, dl));
4572 } else if (LimitFloatPrecision <= 12) {
4573 // For floating-point precision of 12:
4579 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4581 // error 0.000061011436, which is 14 bits
4582 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4583 getF32Constant(DAG, 0xbd67b6d6, dl));
4584 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4585 getF32Constant(DAG, 0x3ee4f4b8, dl));
4586 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4587 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4588 getF32Constant(DAG, 0x3fbc278b, dl));
4589 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4590 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4591 getF32Constant(DAG, 0x40348e95, dl));
4592 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4593 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4594 getF32Constant(DAG, 0x3fdef31a, dl));
4595 } else { // LimitFloatPrecision <= 18
4596 // For floating-point precision of 18:
4604 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4606 // error 0.0000023660568, which is better than 18 bits
4607 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4608 getF32Constant(DAG, 0xbc91e5ac, dl));
4609 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4610 getF32Constant(DAG, 0x3e4350aa, dl));
4611 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4612 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4613 getF32Constant(DAG, 0x3f60d3e3, dl));
4614 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4615 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4616 getF32Constant(DAG, 0x4011cdf0, dl));
4617 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4618 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4619 getF32Constant(DAG, 0x406cfd1c, dl));
4620 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4621 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4622 getF32Constant(DAG, 0x408797cb, dl));
4623 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4624 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4625 getF32Constant(DAG, 0x4006dcab, dl));
4628 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4631 // No special expansion.
4632 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4635 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4636 /// limited-precision mode.
4637 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4638 const TargetLowering &TLI) {
4639 // TODO: What fast-math-flags should be set on the floating-point nodes?
4641 if (Op.getValueType() == MVT::f32 &&
4642 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4643 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4645 // Get the exponent.
4646 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4648 // Get the significand and build it into a floating-point number with
4650 SDValue X = GetSignificand(DAG, Op1, dl);
4652 // Different possible minimax approximations of significand in
4653 // floating-point for various degrees of accuracy over [1,2].
4654 SDValue Log2ofMantissa;
4655 if (LimitFloatPrecision <= 6) {
4656 // For floating-point precision of 6:
4658 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4660 // error 0.0049451742, which is more than 7 bits
4661 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4662 getF32Constant(DAG, 0xbeb08fe0, dl));
4663 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4664 getF32Constant(DAG, 0x40019463, dl));
4665 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4666 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4667 getF32Constant(DAG, 0x3fd6633d, dl));
4668 } else if (LimitFloatPrecision <= 12) {
4669 // For floating-point precision of 12:
4675 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4677 // error 0.0000876136000, which is better than 13 bits
4678 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4679 getF32Constant(DAG, 0xbda7262e, dl));
4680 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4681 getF32Constant(DAG, 0x3f25280b, dl));
4682 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4683 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4684 getF32Constant(DAG, 0x4007b923, dl));
4685 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4686 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4687 getF32Constant(DAG, 0x40823e2f, dl));
4688 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4689 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4690 getF32Constant(DAG, 0x4020d29c, dl));
4691 } else { // LimitFloatPrecision <= 18
4692 // For floating-point precision of 18:
4701 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4703 // error 0.0000018516, which is better than 18 bits
4704 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4705 getF32Constant(DAG, 0xbcd2769e, dl));
4706 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4707 getF32Constant(DAG, 0x3e8ce0b9, dl));
4708 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4709 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4710 getF32Constant(DAG, 0x3fa22ae7, dl));
4711 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4712 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4713 getF32Constant(DAG, 0x40525723, dl));
4714 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4715 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4716 getF32Constant(DAG, 0x40aaf200, dl));
4717 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4718 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4719 getF32Constant(DAG, 0x40c39dad, dl));
4720 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4721 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4722 getF32Constant(DAG, 0x4042902c, dl));
4725 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4728 // No special expansion.
4729 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4732 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4733 /// limited-precision mode.
4734 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4735 const TargetLowering &TLI) {
4736 // TODO: What fast-math-flags should be set on the floating-point nodes?
4738 if (Op.getValueType() == MVT::f32 &&
4739 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4740 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4742 // Scale the exponent by log10(2) [0.30102999f].
4743 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4744 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4745 getF32Constant(DAG, 0x3e9a209a, dl));
4747 // Get the significand and build it into a floating-point number with
4749 SDValue X = GetSignificand(DAG, Op1, dl);
4751 SDValue Log10ofMantissa;
4752 if (LimitFloatPrecision <= 6) {
4753 // For floating-point precision of 6:
4755 // Log10ofMantissa =
4757 // (0.60948995f - 0.10380950f * x) * x;
4759 // error 0.0014886165, which is 6 bits
4760 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4761 getF32Constant(DAG, 0xbdd49a13, dl));
4762 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4763 getF32Constant(DAG, 0x3f1c0789, dl));
4764 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4765 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4766 getF32Constant(DAG, 0x3f011300, dl));
4767 } else if (LimitFloatPrecision <= 12) {
4768 // For floating-point precision of 12:
4770 // Log10ofMantissa =
4773 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4775 // error 0.00019228036, which is better than 12 bits
4776 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4777 getF32Constant(DAG, 0x3d431f31, dl));
4778 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4779 getF32Constant(DAG, 0x3ea21fb2, dl));
4780 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4781 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4782 getF32Constant(DAG, 0x3f6ae232, dl));
4783 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4784 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4785 getF32Constant(DAG, 0x3f25f7c3, dl));
4786 } else { // LimitFloatPrecision <= 18
4787 // For floating-point precision of 18:
4789 // Log10ofMantissa =
4794 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4796 // error 0.0000037995730, which is better than 18 bits
4797 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4798 getF32Constant(DAG, 0x3c5d51ce, dl));
4799 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4800 getF32Constant(DAG, 0x3e00685a, dl));
4801 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4802 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4803 getF32Constant(DAG, 0x3efb6798, dl));
4804 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4805 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4806 getF32Constant(DAG, 0x3f88d192, dl));
4807 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4808 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4809 getF32Constant(DAG, 0x3fc4316c, dl));
4810 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4811 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4812 getF32Constant(DAG, 0x3f57ce70, dl));
4815 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4818 // No special expansion.
4819 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4822 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4823 /// limited-precision mode.
4824 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4825 const TargetLowering &TLI) {
4826 if (Op.getValueType() == MVT::f32 &&
4827 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
4828 return getLimitedPrecisionExp2(Op, dl, DAG);
4830 // No special expansion.
4831 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4834 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
4835 /// limited-precision mode with x == 10.0f.
4836 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
4837 SelectionDAG &DAG, const TargetLowering &TLI) {
4838 bool IsExp10 = false;
4839 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4840 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4841 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4843 IsExp10 = LHSC->isExactlyValue(Ten);
4847 // TODO: What fast-math-flags should be set on the FMUL node?
4849 // Put the exponent in the right bit position for later addition to the
4852 // #define LOG2OF10 3.3219281f
4853 // t0 = Op * LOG2OF10;
4854 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4855 getF32Constant(DAG, 0x40549a78, dl));
4856 return getLimitedPrecisionExp2(t0, dl, DAG);
4859 // No special expansion.
4860 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4863 /// ExpandPowI - Expand a llvm.powi intrinsic.
4864 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
4865 SelectionDAG &DAG) {
4866 // If RHS is a constant, we can expand this out to a multiplication tree,
4867 // otherwise we end up lowering to a call to __powidf2 (for example). When
4868 // optimizing for size, we only want to do this if the expansion would produce
4869 // a small number of multiplies, otherwise we do the full expansion.
4870 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4871 // Get the exponent as a positive value.
4872 unsigned Val = RHSC->getSExtValue();
4873 if ((int)Val < 0) Val = -Val;
4875 // powi(x, 0) -> 1.0
4877 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
4879 const Function &F = DAG.getMachineFunction().getFunction();
4880 if (!F.optForSize() ||
4881 // If optimizing for size, don't insert too many multiplies.
4882 // This inserts up to 5 multiplies.
4883 countPopulation(Val) + Log2_32(Val) < 7) {
4884 // We use the simple binary decomposition method to generate the multiply
4885 // sequence. There are more optimal ways to do this (for example,
4886 // powi(x,15) generates one more multiply than it should), but this has
4887 // the benefit of being both really simple and much better than a libcall.
4888 SDValue Res; // Logically starts equal to 1.0
4889 SDValue CurSquare = LHS;
4890 // TODO: Intrinsics should have fast-math-flags that propagate to these
4895 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4897 Res = CurSquare; // 1.0*CurSquare.
4900 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4901 CurSquare, CurSquare);
4905 // If the original was negative, invert the result, producing 1/(x*x*x).
4906 if (RHSC->getSExtValue() < 0)
4907 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4908 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
4913 // Otherwise, expand to a libcall.
4914 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4917 // getUnderlyingArgReg - Find underlying register used for a truncated or
4918 // bitcasted argument.
4919 static unsigned getUnderlyingArgReg(const SDValue &N) {
4920 switch (N.getOpcode()) {
4921 case ISD::CopyFromReg:
4922 return cast<RegisterSDNode>(N.getOperand(1))->getReg();
4924 case ISD::AssertZext:
4925 case ISD::AssertSext:
4927 return getUnderlyingArgReg(N.getOperand(0));
4933 /// If the DbgValueInst is a dbg_value of a function argument, create the
4934 /// corresponding DBG_VALUE machine instruction for it now. At the end of
4935 /// instruction selection, they will be inserted to the entry BB.
4936 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
4937 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
4938 DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
4939 const Argument *Arg = dyn_cast<Argument>(V);
4943 MachineFunction &MF = DAG.getMachineFunction();
4944 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
4946 bool IsIndirect = false;
4947 Optional<MachineOperand> Op;
4948 // Some arguments' frame index is recorded during argument lowering.
4949 int FI = FuncInfo.getArgumentFrameIndex(Arg);
4950 if (FI != std::numeric_limits<int>::max())
4951 Op = MachineOperand::CreateFI(FI);
4953 if (!Op && N.getNode()) {
4954 unsigned Reg = getUnderlyingArgReg(N);
4955 if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4956 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4957 unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4962 Op = MachineOperand::CreateReg(Reg, false);
4963 IsIndirect = IsDbgDeclare;
4967 if (!Op && N.getNode())
4968 // Check if frame index is available.
4969 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4970 if (FrameIndexSDNode *FINode =
4971 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4972 Op = MachineOperand::CreateFI(FINode->getIndex());
4975 // Check if ValueMap has reg number.
4976 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4977 if (VMI != FuncInfo.ValueMap.end()) {
4978 const auto &TLI = DAG.getTargetLoweringInfo();
4979 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
4980 V->getType(), getABIRegCopyCC(V));
4981 if (RFV.occupiesMultipleRegs()) {
4982 unsigned Offset = 0;
4983 for (auto RegAndSize : RFV.getRegsAndSizes()) {
4984 Op = MachineOperand::CreateReg(RegAndSize.first, false);
4985 auto FragmentExpr = DIExpression::createFragmentExpression(
4986 Expr, Offset, RegAndSize.second);
4989 FuncInfo.ArgDbgValues.push_back(
4990 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
4991 Op->getReg(), Variable, *FragmentExpr));
4992 Offset += RegAndSize.second;
4996 Op = MachineOperand::CreateReg(VMI->second, false);
4997 IsIndirect = IsDbgDeclare;
5004 assert(Variable->isValidLocationForIntrinsic(DL) &&
5005 "Expected inlined-at fields to agree");
5006 IsIndirect = (Op->isReg()) ? IsIndirect : true;
5007 FuncInfo.ArgDbgValues.push_back(
5008 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
5009 *Op, Variable, Expr));
5014 /// Return the appropriate SDDbgValue based on N.
5015 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5016 DILocalVariable *Variable,
5019 unsigned DbgSDNodeOrder) {
5020 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5021 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5022 // stack slot locations.
5024 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5025 // debug values here after optimization:
5027 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
5028 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5030 // Both describe the direct values of their associated variables.
5031 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5032 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5034 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5035 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5038 // VisualStudio defines setjmp as _setjmp
5039 #if defined(_MSC_VER) && defined(setjmp) && \
5040 !defined(setjmp_undefined_for_msvc)
5041 # pragma push_macro("setjmp")
5043 # define setjmp_undefined_for_msvc
5046 /// Lower the call to the specified intrinsic function. If we want to emit this
5047 /// as a call to a named external function, return the name. Otherwise, lower it
5048 /// and return null.
5050 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
5051 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5052 SDLoc sdl = getCurSDLoc();
5053 DebugLoc dl = getCurDebugLoc();
5056 switch (Intrinsic) {
5058 // By default, turn this into a target intrinsic node.
5059 visitTargetIntrinsic(I, Intrinsic);
5061 case Intrinsic::vastart: visitVAStart(I); return nullptr;
5062 case Intrinsic::vaend: visitVAEnd(I); return nullptr;
5063 case Intrinsic::vacopy: visitVACopy(I); return nullptr;
5064 case Intrinsic::returnaddress:
5065 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5066 TLI.getPointerTy(DAG.getDataLayout()),
5067 getValue(I.getArgOperand(0))));
5069 case Intrinsic::addressofreturnaddress:
5070 setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5071 TLI.getPointerTy(DAG.getDataLayout())));
5073 case Intrinsic::sponentry:
5074 setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl,
5075 TLI.getPointerTy(DAG.getDataLayout())));
5077 case Intrinsic::frameaddress:
5078 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5079 TLI.getPointerTy(DAG.getDataLayout()),
5080 getValue(I.getArgOperand(0))));
5082 case Intrinsic::read_register: {
5083 Value *Reg = I.getArgOperand(0);
5084 SDValue Chain = getRoot();
5086 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5087 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5088 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5089 DAG.getVTList(VT, MVT::Other), Chain, RegName);
5091 DAG.setRoot(Res.getValue(1));
5094 case Intrinsic::write_register: {
5095 Value *Reg = I.getArgOperand(0);
5096 Value *RegValue = I.getArgOperand(1);
5097 SDValue Chain = getRoot();
5099 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5100 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5101 RegName, getValue(RegValue)));
5104 case Intrinsic::setjmp:
5105 return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
5106 case Intrinsic::longjmp:
5107 return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
5108 case Intrinsic::memcpy: {
5109 const auto &MCI = cast<MemCpyInst>(I);
5110 SDValue Op1 = getValue(I.getArgOperand(0));
5111 SDValue Op2 = getValue(I.getArgOperand(1));
5112 SDValue Op3 = getValue(I.getArgOperand(2));
5113 // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5114 unsigned DstAlign = std::max<unsigned>(MCI.getDestAlignment(), 1);
5115 unsigned SrcAlign = std::max<unsigned>(MCI.getSourceAlignment(), 1);
5116 unsigned Align = MinAlign(DstAlign, SrcAlign);
5117 bool isVol = MCI.isVolatile();
5118 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5119 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5121 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5123 MachinePointerInfo(I.getArgOperand(0)),
5124 MachinePointerInfo(I.getArgOperand(1)));
5125 updateDAGForMaybeTailCall(MC);
5128 case Intrinsic::memset: {
5129 const auto &MSI = cast<MemSetInst>(I);
5130 SDValue Op1 = getValue(I.getArgOperand(0));
5131 SDValue Op2 = getValue(I.getArgOperand(1));
5132 SDValue Op3 = getValue(I.getArgOperand(2));
5133 // @llvm.memset defines 0 and 1 to both mean no alignment.
5134 unsigned Align = std::max<unsigned>(MSI.getDestAlignment(), 1);
5135 bool isVol = MSI.isVolatile();
5136 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5137 SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5138 isTC, MachinePointerInfo(I.getArgOperand(0)));
5139 updateDAGForMaybeTailCall(MS);
5142 case Intrinsic::memmove: {
5143 const auto &MMI = cast<MemMoveInst>(I);
5144 SDValue Op1 = getValue(I.getArgOperand(0));
5145 SDValue Op2 = getValue(I.getArgOperand(1));
5146 SDValue Op3 = getValue(I.getArgOperand(2));
5147 // @llvm.memmove defines 0 and 1 to both mean no alignment.
5148 unsigned DstAlign = std::max<unsigned>(MMI.getDestAlignment(), 1);
5149 unsigned SrcAlign = std::max<unsigned>(MMI.getSourceAlignment(), 1);
5150 unsigned Align = MinAlign(DstAlign, SrcAlign);
5151 bool isVol = MMI.isVolatile();
5152 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5153 // FIXME: Support passing different dest/src alignments to the memmove DAG
5155 SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5156 isTC, MachinePointerInfo(I.getArgOperand(0)),
5157 MachinePointerInfo(I.getArgOperand(1)));
5158 updateDAGForMaybeTailCall(MM);
5161 case Intrinsic::memcpy_element_unordered_atomic: {
5162 const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5163 SDValue Dst = getValue(MI.getRawDest());
5164 SDValue Src = getValue(MI.getRawSource());
5165 SDValue Length = getValue(MI.getLength());
5167 unsigned DstAlign = MI.getDestAlignment();
5168 unsigned SrcAlign = MI.getSourceAlignment();
5169 Type *LengthTy = MI.getLength()->getType();
5170 unsigned ElemSz = MI.getElementSizeInBytes();
5171 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5172 SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5173 SrcAlign, Length, LengthTy, ElemSz, isTC,
5174 MachinePointerInfo(MI.getRawDest()),
5175 MachinePointerInfo(MI.getRawSource()));
5176 updateDAGForMaybeTailCall(MC);
5179 case Intrinsic::memmove_element_unordered_atomic: {
5180 auto &MI = cast<AtomicMemMoveInst>(I);
5181 SDValue Dst = getValue(MI.getRawDest());
5182 SDValue Src = getValue(MI.getRawSource());
5183 SDValue Length = getValue(MI.getLength());
5185 unsigned DstAlign = MI.getDestAlignment();
5186 unsigned SrcAlign = MI.getSourceAlignment();
5187 Type *LengthTy = MI.getLength()->getType();
5188 unsigned ElemSz = MI.getElementSizeInBytes();
5189 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5190 SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5191 SrcAlign, Length, LengthTy, ElemSz, isTC,
5192 MachinePointerInfo(MI.getRawDest()),
5193 MachinePointerInfo(MI.getRawSource()));
5194 updateDAGForMaybeTailCall(MC);
5197 case Intrinsic::memset_element_unordered_atomic: {
5198 auto &MI = cast<AtomicMemSetInst>(I);
5199 SDValue Dst = getValue(MI.getRawDest());
5200 SDValue Val = getValue(MI.getValue());
5201 SDValue Length = getValue(MI.getLength());
5203 unsigned DstAlign = MI.getDestAlignment();
5204 Type *LengthTy = MI.getLength()->getType();
5205 unsigned ElemSz = MI.getElementSizeInBytes();
5206 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5207 SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5208 LengthTy, ElemSz, isTC,
5209 MachinePointerInfo(MI.getRawDest()));
5210 updateDAGForMaybeTailCall(MC);
5213 case Intrinsic::dbg_addr:
5214 case Intrinsic::dbg_declare: {
5215 const auto &DI = cast<DbgVariableIntrinsic>(I);
5216 DILocalVariable *Variable = DI.getVariable();
5217 DIExpression *Expression = DI.getExpression();
5218 dropDanglingDebugInfo(Variable, Expression);
5219 assert(Variable && "Missing variable");
5221 // Check if address has undef value.
5222 const Value *Address = DI.getVariableLocation();
5223 if (!Address || isa<UndefValue>(Address) ||
5224 (Address->use_empty() && !isa<Argument>(Address))) {
5225 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5229 bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5231 // Check if this variable can be described by a frame index, typically
5232 // either as a static alloca or a byval parameter.
5233 int FI = std::numeric_limits<int>::max();
5234 if (const auto *AI =
5235 dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5236 if (AI->isStaticAlloca()) {
5237 auto I = FuncInfo.StaticAllocaMap.find(AI);
5238 if (I != FuncInfo.StaticAllocaMap.end())
5241 } else if (const auto *Arg = dyn_cast<Argument>(
5242 Address->stripInBoundsConstantOffsets())) {
5243 FI = FuncInfo.getArgumentFrameIndex(Arg);
5246 // llvm.dbg.addr is control dependent and always generates indirect
5247 // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5248 // the MachineFunction variable table.
5249 if (FI != std::numeric_limits<int>::max()) {
5250 if (Intrinsic == Intrinsic::dbg_addr) {
5251 SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5252 Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5253 DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5258 SDValue &N = NodeMap[Address];
5259 if (!N.getNode() && isa<Argument>(Address))
5260 // Check unused arguments map.
5261 N = UnusedArgNodeMap[Address];
5264 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5265 Address = BCI->getOperand(0);
5266 // Parameters are handled specially.
5267 auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
5268 if (isParameter && FINode) {
5269 // Byval parameter. We have a frame index at this point.
5271 DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
5272 /*IsIndirect*/ true, dl, SDNodeOrder);
5273 } else if (isa<Argument>(Address)) {
5274 // Address is an argument, so try to emit its dbg value using
5275 // virtual register info from the FuncInfo.ValueMap.
5276 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
5279 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
5280 true, dl, SDNodeOrder);
5282 DAG.AddDbgValue(SDV, N.getNode(), isParameter);
5284 // If Address is an argument then try to emit its dbg value using
5285 // virtual register info from the FuncInfo.ValueMap.
5286 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
5288 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5293 case Intrinsic::dbg_label: {
5294 const DbgLabelInst &DI = cast<DbgLabelInst>(I);
5295 DILabel *Label = DI.getLabel();
5296 assert(Label && "Missing label");
5299 SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
5300 DAG.AddDbgLabel(SDV);
5303 case Intrinsic::dbg_value: {
5304 const DbgValueInst &DI = cast<DbgValueInst>(I);
5305 assert(DI.getVariable() && "Missing variable");
5307 DILocalVariable *Variable = DI.getVariable();
5308 DIExpression *Expression = DI.getExpression();
5309 dropDanglingDebugInfo(Variable, Expression);
5310 const Value *V = DI.getValue();
5315 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
5316 isa<ConstantPointerNull>(V)) {
5317 SDV = DAG.getConstantDbgValue(Variable, Expression, V, dl, SDNodeOrder);
5318 DAG.AddDbgValue(SDV, nullptr, false);
5322 // Do not use getValue() in here; we don't want to generate code at
5323 // this point if it hasn't been done yet.
5324 SDValue N = NodeMap[V];
5325 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
5326 N = UnusedArgNodeMap[V];
5328 if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl, false, N))
5330 SDV = getDbgValue(N, Variable, Expression, dl, SDNodeOrder);
5331 DAG.AddDbgValue(SDV, N.getNode(), false);
5335 // PHI nodes have already been selected, so we should know which VReg that
5336 // is assigns to already.
5337 if (isa<PHINode>(V)) {
5338 auto VMI = FuncInfo.ValueMap.find(V);
5339 if (VMI != FuncInfo.ValueMap.end()) {
5340 unsigned Reg = VMI->second;
5341 // The PHI node may be split up into several MI PHI nodes (in
5342 // FunctionLoweringInfo::set).
5343 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
5344 V->getType(), None);
5345 if (RFV.occupiesMultipleRegs()) {
5346 unsigned Offset = 0;
5347 unsigned BitsToDescribe = 0;
5348 if (auto VarSize = Variable->getSizeInBits())
5349 BitsToDescribe = *VarSize;
5350 if (auto Fragment = Expression->getFragmentInfo())
5351 BitsToDescribe = Fragment->SizeInBits;
5352 for (auto RegAndSize : RFV.getRegsAndSizes()) {
5353 unsigned RegisterSize = RegAndSize.second;
5354 // Bail out if all bits are described already.
5355 if (Offset >= BitsToDescribe)
5357 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
5358 ? BitsToDescribe - Offset
5360 auto FragmentExpr = DIExpression::createFragmentExpression(
5361 Expression, Offset, FragmentSize);
5364 SDV = DAG.getVRegDbgValue(Variable, *FragmentExpr, RegAndSize.first,
5365 false, dl, SDNodeOrder);
5366 DAG.AddDbgValue(SDV, nullptr, false);
5367 Offset += RegisterSize;
5370 SDV = DAG.getVRegDbgValue(Variable, Expression, Reg, false, dl,
5372 DAG.AddDbgValue(SDV, nullptr, false);
5378 // TODO: When we get here we will either drop the dbg.value completely, or
5379 // we try to move it forward by letting it dangle for awhile. So we should
5380 // probably add an extra DbgValue to the DAG here, with a reference to
5381 // "noreg", to indicate that we have lost the debug location for the
5384 if (!V->use_empty() ) {
5385 // Do not call getValue(V) yet, as we don't want to generate code.
5386 // Remember it for later.
5387 DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5391 LLVM_DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
5392 LLVM_DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
5396 case Intrinsic::eh_typeid_for: {
5397 // Find the type id for the given typeinfo.
5398 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5399 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5400 Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5405 case Intrinsic::eh_return_i32:
5406 case Intrinsic::eh_return_i64:
5407 DAG.getMachineFunction().setCallsEHReturn(true);
5408 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5411 getValue(I.getArgOperand(0)),
5412 getValue(I.getArgOperand(1))));
5414 case Intrinsic::eh_unwind_init:
5415 DAG.getMachineFunction().setCallsUnwindInit(true);
5417 case Intrinsic::eh_dwarf_cfa:
5418 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5419 TLI.getPointerTy(DAG.getDataLayout()),
5420 getValue(I.getArgOperand(0))));
5422 case Intrinsic::eh_sjlj_callsite: {
5423 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5424 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5425 assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
5426 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
5428 MMI.setCurrentCallSite(CI->getZExtValue());
5431 case Intrinsic::eh_sjlj_functioncontext: {
5432 // Get and store the index of the function context.
5433 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
5435 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
5436 int FI = FuncInfo.StaticAllocaMap[FnCtx];
5437 MFI.setFunctionContextIndex(FI);
5440 case Intrinsic::eh_sjlj_setjmp: {
5443 Ops[1] = getValue(I.getArgOperand(0));
5444 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5445 DAG.getVTList(MVT::i32, MVT::Other), Ops);
5446 setValue(&I, Op.getValue(0));
5447 DAG.setRoot(Op.getValue(1));
5450 case Intrinsic::eh_sjlj_longjmp:
5451 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5452 getRoot(), getValue(I.getArgOperand(0))));
5454 case Intrinsic::eh_sjlj_setup_dispatch:
5455 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
5458 case Intrinsic::masked_gather:
5459 visitMaskedGather(I);
5461 case Intrinsic::masked_load:
5464 case Intrinsic::masked_scatter:
5465 visitMaskedScatter(I);
5467 case Intrinsic::masked_store:
5468 visitMaskedStore(I);
5470 case Intrinsic::masked_expandload:
5471 visitMaskedLoad(I, true /* IsExpanding */);
5473 case Intrinsic::masked_compressstore:
5474 visitMaskedStore(I, true /* IsCompressing */);
5476 case Intrinsic::x86_mmx_pslli_w:
5477 case Intrinsic::x86_mmx_pslli_d:
5478 case Intrinsic::x86_mmx_pslli_q:
5479 case Intrinsic::x86_mmx_psrli_w:
5480 case Intrinsic::x86_mmx_psrli_d:
5481 case Intrinsic::x86_mmx_psrli_q:
5482 case Intrinsic::x86_mmx_psrai_w:
5483 case Intrinsic::x86_mmx_psrai_d: {
5484 SDValue ShAmt = getValue(I.getArgOperand(1));
5485 if (isa<ConstantSDNode>(ShAmt)) {
5486 visitTargetIntrinsic(I, Intrinsic);
5489 unsigned NewIntrinsic = 0;
5490 EVT ShAmtVT = MVT::v2i32;
5491 switch (Intrinsic) {
5492 case Intrinsic::x86_mmx_pslli_w:
5493 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5495 case Intrinsic::x86_mmx_pslli_d:
5496 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5498 case Intrinsic::x86_mmx_pslli_q:
5499 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5501 case Intrinsic::x86_mmx_psrli_w:
5502 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5504 case Intrinsic::x86_mmx_psrli_d:
5505 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5507 case Intrinsic::x86_mmx_psrli_q:
5508 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5510 case Intrinsic::x86_mmx_psrai_w:
5511 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5513 case Intrinsic::x86_mmx_psrai_d:
5514 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5516 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5519 // The vector shift intrinsics with scalars uses 32b shift amounts but
5520 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5522 // We must do this early because v2i32 is not a legal type.
5525 ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
5526 ShAmt = DAG.getBuildVector(ShAmtVT, sdl, ShOps);
5527 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5528 ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5529 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5530 DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
5531 getValue(I.getArgOperand(0)), ShAmt);
5535 case Intrinsic::powi:
5536 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5537 getValue(I.getArgOperand(1)), DAG));
5539 case Intrinsic::log:
5540 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5542 case Intrinsic::log2:
5543 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5545 case Intrinsic::log10:
5546 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5548 case Intrinsic::exp:
5549 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5551 case Intrinsic::exp2:
5552 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5554 case Intrinsic::pow:
5555 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5556 getValue(I.getArgOperand(1)), DAG, TLI));
5558 case Intrinsic::sqrt:
5559 case Intrinsic::fabs:
5560 case Intrinsic::sin:
5561 case Intrinsic::cos:
5562 case Intrinsic::floor:
5563 case Intrinsic::ceil:
5564 case Intrinsic::trunc:
5565 case Intrinsic::rint:
5566 case Intrinsic::nearbyint:
5567 case Intrinsic::round:
5568 case Intrinsic::canonicalize: {
5570 switch (Intrinsic) {
5571 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5572 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
5573 case Intrinsic::fabs: Opcode = ISD::FABS; break;
5574 case Intrinsic::sin: Opcode = ISD::FSIN; break;
5575 case Intrinsic::cos: Opcode = ISD::FCOS; break;
5576 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
5577 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
5578 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
5579 case Intrinsic::rint: Opcode = ISD::FRINT; break;
5580 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5581 case Intrinsic::round: Opcode = ISD::FROUND; break;
5582 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
5585 setValue(&I, DAG.getNode(Opcode, sdl,
5586 getValue(I.getArgOperand(0)).getValueType(),
5587 getValue(I.getArgOperand(0))));
5590 case Intrinsic::minnum: {
5591 auto VT = getValue(I.getArgOperand(0)).getValueType();
5593 I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT)
5596 setValue(&I, DAG.getNode(Opc, sdl, VT,
5597 getValue(I.getArgOperand(0)),
5598 getValue(I.getArgOperand(1))));
5601 case Intrinsic::maxnum: {
5602 auto VT = getValue(I.getArgOperand(0)).getValueType();
5604 I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT)
5607 setValue(&I, DAG.getNode(Opc, sdl, VT,
5608 getValue(I.getArgOperand(0)),
5609 getValue(I.getArgOperand(1))));
5612 case Intrinsic::minimum:
5613 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
5614 getValue(I.getArgOperand(0)).getValueType(),
5615 getValue(I.getArgOperand(0)),
5616 getValue(I.getArgOperand(1))));
5618 case Intrinsic::maximum:
5619 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
5620 getValue(I.getArgOperand(0)).getValueType(),
5621 getValue(I.getArgOperand(0)),
5622 getValue(I.getArgOperand(1))));
5624 case Intrinsic::copysign:
5625 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5626 getValue(I.getArgOperand(0)).getValueType(),
5627 getValue(I.getArgOperand(0)),
5628 getValue(I.getArgOperand(1))));
5630 case Intrinsic::fma:
5631 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5632 getValue(I.getArgOperand(0)).getValueType(),
5633 getValue(I.getArgOperand(0)),
5634 getValue(I.getArgOperand(1)),
5635 getValue(I.getArgOperand(2))));
5637 case Intrinsic::experimental_constrained_fadd:
5638 case Intrinsic::experimental_constrained_fsub:
5639 case Intrinsic::experimental_constrained_fmul:
5640 case Intrinsic::experimental_constrained_fdiv:
5641 case Intrinsic::experimental_constrained_frem:
5642 case Intrinsic::experimental_constrained_fma:
5643 case Intrinsic::experimental_constrained_sqrt:
5644 case Intrinsic::experimental_constrained_pow:
5645 case Intrinsic::experimental_constrained_powi:
5646 case Intrinsic::experimental_constrained_sin:
5647 case Intrinsic::experimental_constrained_cos:
5648 case Intrinsic::experimental_constrained_exp:
5649 case Intrinsic::experimental_constrained_exp2:
5650 case Intrinsic::experimental_constrained_log:
5651 case Intrinsic::experimental_constrained_log10:
5652 case Intrinsic::experimental_constrained_log2:
5653 case Intrinsic::experimental_constrained_rint:
5654 case Intrinsic::experimental_constrained_nearbyint:
5655 case Intrinsic::experimental_constrained_maxnum:
5656 case Intrinsic::experimental_constrained_minnum:
5657 case Intrinsic::experimental_constrained_ceil:
5658 case Intrinsic::experimental_constrained_floor:
5659 case Intrinsic::experimental_constrained_round:
5660 case Intrinsic::experimental_constrained_trunc:
5661 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
5663 case Intrinsic::fmuladd: {
5664 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5665 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5666 TLI.isFMAFasterThanFMulAndFAdd(VT)) {
5667 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5668 getValue(I.getArgOperand(0)).getValueType(),
5669 getValue(I.getArgOperand(0)),
5670 getValue(I.getArgOperand(1)),
5671 getValue(I.getArgOperand(2))));
5673 // TODO: Intrinsic calls should have fast-math-flags.
5674 SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5675 getValue(I.getArgOperand(0)).getValueType(),
5676 getValue(I.getArgOperand(0)),
5677 getValue(I.getArgOperand(1)));
5678 SDValue Add = DAG.getNode(ISD::FADD, sdl,
5679 getValue(I.getArgOperand(0)).getValueType(),
5681 getValue(I.getArgOperand(2)));
5686 case Intrinsic::convert_to_fp16:
5687 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5688 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5689 getValue(I.getArgOperand(0)),
5690 DAG.getTargetConstant(0, sdl,
5693 case Intrinsic::convert_from_fp16:
5694 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
5695 TLI.getValueType(DAG.getDataLayout(), I.getType()),
5696 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5697 getValue(I.getArgOperand(0)))));
5699 case Intrinsic::pcmarker: {
5700 SDValue Tmp = getValue(I.getArgOperand(0));
5701 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5704 case Intrinsic::readcyclecounter: {
5705 SDValue Op = getRoot();
5706 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5707 DAG.getVTList(MVT::i64, MVT::Other), Op);
5709 DAG.setRoot(Res.getValue(1));
5712 case Intrinsic::bitreverse:
5713 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
5714 getValue(I.getArgOperand(0)).getValueType(),
5715 getValue(I.getArgOperand(0))));
5717 case Intrinsic::bswap:
5718 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5719 getValue(I.getArgOperand(0)).getValueType(),
5720 getValue(I.getArgOperand(0))));
5722 case Intrinsic::cttz: {
5723 SDValue Arg = getValue(I.getArgOperand(0));
5724 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5725 EVT Ty = Arg.getValueType();
5726 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5730 case Intrinsic::ctlz: {
5731 SDValue Arg = getValue(I.getArgOperand(0));
5732 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5733 EVT Ty = Arg.getValueType();
5734 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5738 case Intrinsic::ctpop: {
5739 SDValue Arg = getValue(I.getArgOperand(0));
5740 EVT Ty = Arg.getValueType();
5741 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5744 case Intrinsic::fshl:
5745 case Intrinsic::fshr: {
5746 bool IsFSHL = Intrinsic == Intrinsic::fshl;
5747 SDValue X = getValue(I.getArgOperand(0));
5748 SDValue Y = getValue(I.getArgOperand(1));
5749 SDValue Z = getValue(I.getArgOperand(2));
5750 EVT VT = X.getValueType();
5751 SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT);
5752 SDValue Zero = DAG.getConstant(0, sdl, VT);
5753 SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC);
5755 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
5756 if (TLI.isOperationLegalOrCustom(FunnelOpcode, VT)) {
5757 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
5761 // When X == Y, this is rotate. If the data type has a power-of-2 size, we
5762 // avoid the select that is necessary in the general case to filter out
5763 // the 0-shift possibility that leads to UB.
5764 if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
5765 // TODO: This should also be done if the operation is custom, but we have
5766 // to make sure targets are handling the modulo shift amount as expected.
5767 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
5768 if (TLI.isOperationLegal(RotateOpcode, VT)) {
5769 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
5773 // Some targets only rotate one way. Try the opposite direction.
5774 RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
5775 if (TLI.isOperationLegal(RotateOpcode, VT)) {
5776 // Negate the shift amount because it is safe to ignore the high bits.
5777 SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
5778 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
5782 // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
5783 // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
5784 SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
5785 SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC);
5786 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt);
5787 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt);
5788 setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY));
5792 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
5793 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
5794 SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt);
5795 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt);
5796 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt);
5797 SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY);
5799 // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
5800 // and that is undefined. We must compare and select to avoid UB.
5803 CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements());
5805 // For fshl, 0-shift returns the 1st arg (X).
5806 // For fshr, 0-shift returns the 2nd arg (Y).
5807 SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ);
5808 setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
5811 case Intrinsic::sadd_sat: {
5812 SDValue Op1 = getValue(I.getArgOperand(0));
5813 SDValue Op2 = getValue(I.getArgOperand(1));
5814 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
5817 case Intrinsic::uadd_sat: {
5818 SDValue Op1 = getValue(I.getArgOperand(0));
5819 SDValue Op2 = getValue(I.getArgOperand(1));
5820 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
5823 case Intrinsic::ssub_sat: {
5824 SDValue Op1 = getValue(I.getArgOperand(0));
5825 SDValue Op2 = getValue(I.getArgOperand(1));
5826 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
5829 case Intrinsic::usub_sat: {
5830 SDValue Op1 = getValue(I.getArgOperand(0));
5831 SDValue Op2 = getValue(I.getArgOperand(1));
5832 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
5835 case Intrinsic::smul_fix: {
5836 SDValue Op1 = getValue(I.getArgOperand(0));
5837 SDValue Op2 = getValue(I.getArgOperand(1));
5838 SDValue Op3 = getValue(I.getArgOperand(2));
5840 DAG.getNode(ISD::SMULFIX, sdl, Op1.getValueType(), Op1, Op2, Op3));
5843 case Intrinsic::stacksave: {
5844 SDValue Op = getRoot();
5846 ISD::STACKSAVE, sdl,
5847 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
5849 DAG.setRoot(Res.getValue(1));
5852 case Intrinsic::stackrestore:
5853 Res = getValue(I.getArgOperand(0));
5854 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5856 case Intrinsic::get_dynamic_area_offset: {
5857 SDValue Op = getRoot();
5858 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5859 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
5860 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
5863 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
5865 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
5871 case Intrinsic::stackguard: {
5872 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5873 MachineFunction &MF = DAG.getMachineFunction();
5874 const Module &M = *MF.getFunction().getParent();
5875 SDValue Chain = getRoot();
5876 if (TLI.useLoadStackGuardNode()) {
5877 Res = getLoadStackGuard(DAG, sdl, Chain);
5879 const Value *Global = TLI.getSDagStackGuard(M);
5880 unsigned Align = DL->getPrefTypeAlignment(Global->getType());
5881 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
5882 MachinePointerInfo(Global, 0), Align,
5883 MachineMemOperand::MOVolatile);
5885 if (TLI.useStackGuardXorFP())
5886 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
5891 case Intrinsic::stackprotector: {
5892 // Emit code into the DAG to store the stack guard onto the stack.
5893 MachineFunction &MF = DAG.getMachineFunction();
5894 MachineFrameInfo &MFI = MF.getFrameInfo();
5895 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5896 SDValue Src, Chain = getRoot();
5898 if (TLI.useLoadStackGuardNode())
5899 Src = getLoadStackGuard(DAG, sdl, Chain);
5901 Src = getValue(I.getArgOperand(0)); // The guard's value.
5903 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5905 int FI = FuncInfo.StaticAllocaMap[Slot];
5906 MFI.setStackProtectorIndex(FI);
5908 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5910 // Store the stack protector onto the stack.
5911 Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
5912 DAG.getMachineFunction(), FI),
5913 /* Alignment = */ 0, MachineMemOperand::MOVolatile);
5918 case Intrinsic::objectsize: {
5919 // If we don't know by now, we're never going to know.
5920 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5922 assert(CI && "Non-constant type in __builtin_object_size?");
5924 SDValue Arg = getValue(I.getCalledValue());
5925 EVT Ty = Arg.getValueType();
5928 Res = DAG.getConstant(-1ULL, sdl, Ty);
5930 Res = DAG.getConstant(0, sdl, Ty);
5936 case Intrinsic::is_constant:
5937 // If this wasn't constant-folded away by now, then it's not a
5939 setValue(&I, DAG.getConstant(0, sdl, MVT::i1));
5942 case Intrinsic::annotation:
5943 case Intrinsic::ptr_annotation:
5944 case Intrinsic::launder_invariant_group:
5945 case Intrinsic::strip_invariant_group:
5946 // Drop the intrinsic, but forward the value
5947 setValue(&I, getValue(I.getOperand(0)));
5949 case Intrinsic::assume:
5950 case Intrinsic::var_annotation:
5951 case Intrinsic::sideeffect:
5952 // Discard annotate attributes, assumptions, and artificial side-effects.
5955 case Intrinsic::codeview_annotation: {
5956 // Emit a label associated with this metadata.
5957 MachineFunction &MF = DAG.getMachineFunction();
5959 MF.getMMI().getContext().createTempSymbol("annotation", true);
5960 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
5961 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
5962 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
5967 case Intrinsic::init_trampoline: {
5968 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5972 Ops[1] = getValue(I.getArgOperand(0));
5973 Ops[2] = getValue(I.getArgOperand(1));
5974 Ops[3] = getValue(I.getArgOperand(2));
5975 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5976 Ops[5] = DAG.getSrcValue(F);
5978 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5983 case Intrinsic::adjust_trampoline:
5984 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5985 TLI.getPointerTy(DAG.getDataLayout()),
5986 getValue(I.getArgOperand(0))));
5988 case Intrinsic::gcroot: {
5989 assert(DAG.getMachineFunction().getFunction().hasGC() &&
5990 "only valid in functions with gc specified, enforced by Verifier");
5991 assert(GFI && "implied by previous");
5992 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
5993 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
5995 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
5996 GFI->addStackRoot(FI->getIndex(), TypeMap);
5999 case Intrinsic::gcread:
6000 case Intrinsic::gcwrite:
6001 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
6002 case Intrinsic::flt_rounds:
6003 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
6006 case Intrinsic::expect:
6007 // Just replace __builtin_expect(exp, c) with EXP.
6008 setValue(&I, getValue(I.getArgOperand(0)));
6011 case Intrinsic::debugtrap:
6012 case Intrinsic::trap: {
6013 StringRef TrapFuncName =
6015 .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
6016 .getValueAsString();
6017 if (TrapFuncName.empty()) {
6018 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
6019 ISD::TRAP : ISD::DEBUGTRAP;
6020 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
6023 TargetLowering::ArgListTy Args;
6025 TargetLowering::CallLoweringInfo CLI(DAG);
6026 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
6027 CallingConv::C, I.getType(),
6028 DAG.getExternalSymbol(TrapFuncName.data(),
6029 TLI.getPointerTy(DAG.getDataLayout())),
6032 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6033 DAG.setRoot(Result.second);
6037 case Intrinsic::uadd_with_overflow:
6038 case Intrinsic::sadd_with_overflow:
6039 case Intrinsic::usub_with_overflow:
6040 case Intrinsic::ssub_with_overflow:
6041 case Intrinsic::umul_with_overflow:
6042 case Intrinsic::smul_with_overflow: {
6044 switch (Intrinsic) {
6045 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6046 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
6047 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
6048 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
6049 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
6050 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
6051 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
6053 SDValue Op1 = getValue(I.getArgOperand(0));
6054 SDValue Op2 = getValue(I.getArgOperand(1));
6056 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
6057 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
6060 case Intrinsic::prefetch: {
6062 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6063 auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
6064 Ops[0] = DAG.getRoot();
6065 Ops[1] = getValue(I.getArgOperand(0));
6066 Ops[2] = getValue(I.getArgOperand(1));
6067 Ops[3] = getValue(I.getArgOperand(2));
6068 Ops[4] = getValue(I.getArgOperand(3));
6069 SDValue Result = DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
6070 DAG.getVTList(MVT::Other), Ops,
6071 EVT::getIntegerVT(*Context, 8),
6072 MachinePointerInfo(I.getArgOperand(0)),
6076 // Chain the prefetch in parallell with any pending loads, to stay out of
6077 // the way of later optimizations.
6078 PendingLoads.push_back(Result);
6080 DAG.setRoot(Result);
6083 case Intrinsic::lifetime_start:
6084 case Intrinsic::lifetime_end: {
6085 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
6086 // Stack coloring is not enabled in O0, discard region information.
6087 if (TM.getOptLevel() == CodeGenOpt::None)
6090 SmallVector<Value *, 4> Allocas;
6091 GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL);
6093 for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
6094 E = Allocas.end(); Object != E; ++Object) {
6095 AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6097 // Could not find an Alloca.
6098 if (!LifetimeObject)
6101 // First check that the Alloca is static, otherwise it won't have a
6102 // valid frame index.
6103 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6104 if (SI == FuncInfo.StaticAllocaMap.end())
6107 int FI = SI->second;
6112 DAG.getFrameIndex(FI, TLI.getFrameIndexTy(DAG.getDataLayout()), true);
6113 unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
6115 Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
6120 case Intrinsic::invariant_start:
6121 // Discard region information.
6122 setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6124 case Intrinsic::invariant_end:
6125 // Discard region information.
6127 case Intrinsic::clear_cache:
6128 return TLI.getClearCacheBuiltinName();
6129 case Intrinsic::donothing:
6132 case Intrinsic::experimental_stackmap:
6135 case Intrinsic::experimental_patchpoint_void:
6136 case Intrinsic::experimental_patchpoint_i64:
6137 visitPatchpoint(&I);
6139 case Intrinsic::experimental_gc_statepoint:
6140 LowerStatepoint(ImmutableStatepoint(&I));
6142 case Intrinsic::experimental_gc_result:
6143 visitGCResult(cast<GCResultInst>(I));
6145 case Intrinsic::experimental_gc_relocate:
6146 visitGCRelocate(cast<GCRelocateInst>(I));
6148 case Intrinsic::instrprof_increment:
6149 llvm_unreachable("instrprof failed to lower an increment");
6150 case Intrinsic::instrprof_value_profile:
6151 llvm_unreachable("instrprof failed to lower a value profiling call");
6152 case Intrinsic::localescape: {
6153 MachineFunction &MF = DAG.getMachineFunction();
6154 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6156 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6157 // is the same on all targets.
6158 for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6159 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6160 if (isa<ConstantPointerNull>(Arg))
6161 continue; // Skip null pointers. They represent a hole in index space.
6162 AllocaInst *Slot = cast<AllocaInst>(Arg);
6163 assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6164 "can only escape static allocas");
6165 int FI = FuncInfo.StaticAllocaMap[Slot];
6166 MCSymbol *FrameAllocSym =
6167 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6168 GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6169 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6170 TII->get(TargetOpcode::LOCAL_ESCAPE))
6171 .addSym(FrameAllocSym)
6178 case Intrinsic::localrecover: {
6179 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6180 MachineFunction &MF = DAG.getMachineFunction();
6181 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
6183 // Get the symbol that defines the frame offset.
6184 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6185 auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6187 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6188 MCSymbol *FrameAllocSym =
6189 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6190 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6192 // Create a MCSymbol for the label to avoid any target lowering
6193 // that would make this PC relative.
6194 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6196 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6198 // Add the offset to the FP.
6199 Value *FP = I.getArgOperand(1);
6200 SDValue FPVal = getValue(FP);
6201 SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal);
6207 case Intrinsic::eh_exceptionpointer:
6208 case Intrinsic::eh_exceptioncode: {
6209 // Get the exception pointer vreg, copy from it, and resize it to fit.
6210 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6211 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6212 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6213 unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6215 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6216 if (Intrinsic == Intrinsic::eh_exceptioncode)
6217 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6221 case Intrinsic::xray_customevent: {
6222 // Here we want to make sure that the intrinsic behaves as if it has a
6223 // specific calling convention, and only for x86_64.
6224 // FIXME: Support other platforms later.
6225 const auto &Triple = DAG.getTarget().getTargetTriple();
6226 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6229 SDLoc DL = getCurSDLoc();
6230 SmallVector<SDValue, 8> Ops;
6232 // We want to say that we always want the arguments in registers.
6233 SDValue LogEntryVal = getValue(I.getArgOperand(0));
6234 SDValue StrSizeVal = getValue(I.getArgOperand(1));
6235 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6236 SDValue Chain = getRoot();
6237 Ops.push_back(LogEntryVal);
6238 Ops.push_back(StrSizeVal);
6239 Ops.push_back(Chain);
6241 // We need to enforce the calling convention for the callsite, so that
6242 // argument ordering is enforced correctly, and that register allocation can
6243 // see that some registers may be assumed clobbered and have to preserve
6244 // them across calls to the intrinsic.
6245 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6247 SDValue patchableNode = SDValue(MN, 0);
6248 DAG.setRoot(patchableNode);
6249 setValue(&I, patchableNode);
6252 case Intrinsic::xray_typedevent: {
6253 // Here we want to make sure that the intrinsic behaves as if it has a
6254 // specific calling convention, and only for x86_64.
6255 // FIXME: Support other platforms later.
6256 const auto &Triple = DAG.getTarget().getTargetTriple();
6257 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6260 SDLoc DL = getCurSDLoc();
6261 SmallVector<SDValue, 8> Ops;
6263 // We want to say that we always want the arguments in registers.
6264 // It's unclear to me how manipulating the selection DAG here forces callers
6265 // to provide arguments in registers instead of on the stack.
6266 SDValue LogTypeId = getValue(I.getArgOperand(0));
6267 SDValue LogEntryVal = getValue(I.getArgOperand(1));
6268 SDValue StrSizeVal = getValue(I.getArgOperand(2));
6269 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6270 SDValue Chain = getRoot();
6271 Ops.push_back(LogTypeId);
6272 Ops.push_back(LogEntryVal);
6273 Ops.push_back(StrSizeVal);
6274 Ops.push_back(Chain);
6276 // We need to enforce the calling convention for the callsite, so that
6277 // argument ordering is enforced correctly, and that register allocation can
6278 // see that some registers may be assumed clobbered and have to preserve
6279 // them across calls to the intrinsic.
6280 MachineSDNode *MN = DAG.getMachineNode(
6281 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6282 SDValue patchableNode = SDValue(MN, 0);
6283 DAG.setRoot(patchableNode);
6284 setValue(&I, patchableNode);
6287 case Intrinsic::experimental_deoptimize:
6288 LowerDeoptimizeCall(&I);
6291 case Intrinsic::experimental_vector_reduce_fadd:
6292 case Intrinsic::experimental_vector_reduce_fmul:
6293 case Intrinsic::experimental_vector_reduce_add:
6294 case Intrinsic::experimental_vector_reduce_mul:
6295 case Intrinsic::experimental_vector_reduce_and:
6296 case Intrinsic::experimental_vector_reduce_or:
6297 case Intrinsic::experimental_vector_reduce_xor:
6298 case Intrinsic::experimental_vector_reduce_smax:
6299 case Intrinsic::experimental_vector_reduce_smin:
6300 case Intrinsic::experimental_vector_reduce_umax:
6301 case Intrinsic::experimental_vector_reduce_umin:
6302 case Intrinsic::experimental_vector_reduce_fmax:
6303 case Intrinsic::experimental_vector_reduce_fmin:
6304 visitVectorReduce(I, Intrinsic);
6307 case Intrinsic::icall_branch_funnel: {
6308 SmallVector<SDValue, 16> Ops;
6309 Ops.push_back(DAG.getRoot());
6310 Ops.push_back(getValue(I.getArgOperand(0)));
6313 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6314 I.getArgOperand(1), Offset, DAG.getDataLayout()));
6317 "llvm.icall.branch.funnel operand must be a GlobalValue");
6318 Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
6320 struct BranchFunnelTarget {
6324 SmallVector<BranchFunnelTarget, 8> Targets;
6326 for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
6327 auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6328 I.getArgOperand(Op), Offset, DAG.getDataLayout()));
6329 if (ElemBase != Base)
6330 report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6331 "to the same GlobalValue");
6333 SDValue Val = getValue(I.getArgOperand(Op + 1));
6334 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
6337 "llvm.icall.branch.funnel operand must be a GlobalValue");
6338 Targets.push_back({Offset, DAG.getTargetGlobalAddress(
6339 GA->getGlobal(), getCurSDLoc(),
6340 Val.getValueType(), GA->getOffset())});
6343 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
6344 return T1.Offset < T2.Offset;
6347 for (auto &T : Targets) {
6348 Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
6349 Ops.push_back(T.Target);
6352 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
6353 getCurSDLoc(), MVT::Other, Ops),
6361 case Intrinsic::wasm_landingpad_index:
6362 // Information this intrinsic contained has been transferred to
6363 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
6366 case Intrinsic::objc_autorelease:
6367 return "objc_autorelease";
6368 case Intrinsic::objc_autoreleasePoolPop:
6369 return "objc_autoreleasePoolPop";
6370 case Intrinsic::objc_autoreleasePoolPush:
6371 return "objc_autoreleasePoolPush";
6372 case Intrinsic::objc_autoreleaseReturnValue:
6373 return "objc_autoreleaseReturnValue";
6374 case Intrinsic::objc_copyWeak:
6375 return "objc_copyWeak";
6376 case Intrinsic::objc_destroyWeak:
6377 return "objc_destroyWeak";
6378 case Intrinsic::objc_initWeak:
6379 return "objc_initWeak";
6380 case Intrinsic::objc_loadWeak:
6381 return "objc_loadWeak";
6382 case Intrinsic::objc_loadWeakRetained:
6383 return "objc_loadWeakRetained";
6384 case Intrinsic::objc_moveWeak:
6385 return "objc_moveWeak";
6386 case Intrinsic::objc_release:
6387 return "objc_release";
6388 case Intrinsic::objc_retain:
6389 return "objc_retain";
6390 case Intrinsic::objc_retainAutorelease:
6391 return "objc_retainAutorelease";
6392 case Intrinsic::objc_retainAutoreleaseReturnValue:
6393 return "objc_retainAutoreleaseReturnValue";
6394 case Intrinsic::objc_retainAutoreleasedReturnValue:
6395 return "objc_retainAutoreleasedReturnValue";
6396 case Intrinsic::objc_retainBlock:
6397 return "objc_retainBlock";
6398 case Intrinsic::objc_storeStrong:
6399 return "objc_storeStrong";
6400 case Intrinsic::objc_storeWeak:
6401 return "objc_storeWeak";
6402 case Intrinsic::objc_unsafeClaimAutoreleasedReturnValue:
6403 return "objc_unsafeClaimAutoreleasedReturnValue";
6404 case Intrinsic::objc_retainedObject:
6405 return "objc_retainedObject";
6406 case Intrinsic::objc_unretainedObject:
6407 return "objc_unretainedObject";
6408 case Intrinsic::objc_unretainedPointer:
6409 return "objc_unretainedPointer";
6410 case Intrinsic::objc_retain_autorelease:
6411 return "objc_retain_autorelease";
6412 case Intrinsic::objc_sync_enter:
6413 return "objc_sync_enter";
6414 case Intrinsic::objc_sync_exit:
6415 return "objc_sync_exit";
6419 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
6420 const ConstrainedFPIntrinsic &FPI) {
6421 SDLoc sdl = getCurSDLoc();
6423 switch (FPI.getIntrinsicID()) {
6424 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6425 case Intrinsic::experimental_constrained_fadd:
6426 Opcode = ISD::STRICT_FADD;
6428 case Intrinsic::experimental_constrained_fsub:
6429 Opcode = ISD::STRICT_FSUB;
6431 case Intrinsic::experimental_constrained_fmul:
6432 Opcode = ISD::STRICT_FMUL;
6434 case Intrinsic::experimental_constrained_fdiv:
6435 Opcode = ISD::STRICT_FDIV;
6437 case Intrinsic::experimental_constrained_frem:
6438 Opcode = ISD::STRICT_FREM;
6440 case Intrinsic::experimental_constrained_fma:
6441 Opcode = ISD::STRICT_FMA;
6443 case Intrinsic::experimental_constrained_sqrt:
6444 Opcode = ISD::STRICT_FSQRT;
6446 case Intrinsic::experimental_constrained_pow:
6447 Opcode = ISD::STRICT_FPOW;
6449 case Intrinsic::experimental_constrained_powi:
6450 Opcode = ISD::STRICT_FPOWI;
6452 case Intrinsic::experimental_constrained_sin:
6453 Opcode = ISD::STRICT_FSIN;
6455 case Intrinsic::experimental_constrained_cos:
6456 Opcode = ISD::STRICT_FCOS;
6458 case Intrinsic::experimental_constrained_exp:
6459 Opcode = ISD::STRICT_FEXP;
6461 case Intrinsic::experimental_constrained_exp2:
6462 Opcode = ISD::STRICT_FEXP2;
6464 case Intrinsic::experimental_constrained_log:
6465 Opcode = ISD::STRICT_FLOG;
6467 case Intrinsic::experimental_constrained_log10:
6468 Opcode = ISD::STRICT_FLOG10;
6470 case Intrinsic::experimental_constrained_log2:
6471 Opcode = ISD::STRICT_FLOG2;
6473 case Intrinsic::experimental_constrained_rint:
6474 Opcode = ISD::STRICT_FRINT;
6476 case Intrinsic::experimental_constrained_nearbyint:
6477 Opcode = ISD::STRICT_FNEARBYINT;
6479 case Intrinsic::experimental_constrained_maxnum:
6480 Opcode = ISD::STRICT_FMAXNUM;
6482 case Intrinsic::experimental_constrained_minnum:
6483 Opcode = ISD::STRICT_FMINNUM;
6485 case Intrinsic::experimental_constrained_ceil:
6486 Opcode = ISD::STRICT_FCEIL;
6488 case Intrinsic::experimental_constrained_floor:
6489 Opcode = ISD::STRICT_FFLOOR;
6491 case Intrinsic::experimental_constrained_round:
6492 Opcode = ISD::STRICT_FROUND;
6494 case Intrinsic::experimental_constrained_trunc:
6495 Opcode = ISD::STRICT_FTRUNC;
6498 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6499 SDValue Chain = getRoot();
6500 SmallVector<EVT, 4> ValueVTs;
6501 ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
6502 ValueVTs.push_back(MVT::Other); // Out chain
6504 SDVTList VTs = DAG.getVTList(ValueVTs);
6506 if (FPI.isUnaryOp())
6507 Result = DAG.getNode(Opcode, sdl, VTs,
6508 { Chain, getValue(FPI.getArgOperand(0)) });
6509 else if (FPI.isTernaryOp())
6510 Result = DAG.getNode(Opcode, sdl, VTs,
6511 { Chain, getValue(FPI.getArgOperand(0)),
6512 getValue(FPI.getArgOperand(1)),
6513 getValue(FPI.getArgOperand(2)) });
6515 Result = DAG.getNode(Opcode, sdl, VTs,
6516 { Chain, getValue(FPI.getArgOperand(0)),
6517 getValue(FPI.getArgOperand(1)) });
6519 assert(Result.getNode()->getNumValues() == 2);
6520 SDValue OutChain = Result.getValue(1);
6521 DAG.setRoot(OutChain);
6522 SDValue FPResult = Result.getValue(0);
6523 setValue(&FPI, FPResult);
6526 std::pair<SDValue, SDValue>
6527 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
6528 const BasicBlock *EHPadBB) {
6529 MachineFunction &MF = DAG.getMachineFunction();
6530 MachineModuleInfo &MMI = MF.getMMI();
6531 MCSymbol *BeginLabel = nullptr;
6534 // Insert a label before the invoke call to mark the try range. This can be
6535 // used to detect deletion of the invoke via the MachineModuleInfo.
6536 BeginLabel = MMI.getContext().createTempSymbol();
6538 // For SjLj, keep track of which landing pads go with which invokes
6539 // so as to maintain the ordering of pads in the LSDA.
6540 unsigned CallSiteIndex = MMI.getCurrentCallSite();
6541 if (CallSiteIndex) {
6542 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
6543 LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
6545 // Now that the call site is handled, stop tracking it.
6546 MMI.setCurrentCallSite(0);
6549 // Both PendingLoads and PendingExports must be flushed here;
6550 // this call might not return.
6552 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
6554 CLI.setChain(getRoot());
6556 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6557 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6559 assert((CLI.IsTailCall || Result.second.getNode()) &&
6560 "Non-null chain expected with non-tail call!");
6561 assert((Result.second.getNode() || !Result.first.getNode()) &&
6562 "Null value expected with tail call!");
6564 if (!Result.second.getNode()) {
6565 // As a special case, a null chain means that a tail call has been emitted
6566 // and the DAG root is already updated.
6569 // Since there's no actual continuation from this block, nothing can be
6570 // relying on us setting vregs for them.
6571 PendingExports.clear();
6573 DAG.setRoot(Result.second);
6577 // Insert a label at the end of the invoke call to mark the try range. This
6578 // can be used to detect deletion of the invoke via the MachineModuleInfo.
6579 MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
6580 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
6582 // Inform MachineModuleInfo of range.
6583 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
6584 // There is a platform (e.g. wasm) that uses funclet style IR but does not
6585 // actually use outlined funclets and their LSDA info style.
6586 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
6588 WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
6589 EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS.getInstruction()),
6590 BeginLabel, EndLabel);
6591 } else if (!isScopedEHPersonality(Pers)) {
6592 MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
6599 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
6601 const BasicBlock *EHPadBB) {
6602 auto &DL = DAG.getDataLayout();
6603 FunctionType *FTy = CS.getFunctionType();
6604 Type *RetTy = CS.getType();
6606 TargetLowering::ArgListTy Args;
6607 Args.reserve(CS.arg_size());
6609 const Value *SwiftErrorVal = nullptr;
6610 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6612 // We can't tail call inside a function with a swifterror argument. Lowering
6613 // does not support this yet. It would have to move into the swifterror
6614 // register before the call.
6615 auto *Caller = CS.getInstruction()->getParent()->getParent();
6616 if (TLI.supportSwiftError() &&
6617 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
6620 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
6622 TargetLowering::ArgListEntry Entry;
6623 const Value *V = *i;
6626 if (V->getType()->isEmptyTy())
6629 SDValue ArgNode = getValue(V);
6630 Entry.Node = ArgNode; Entry.Ty = V->getType();
6632 Entry.setAttributes(&CS, i - CS.arg_begin());
6634 // Use swifterror virtual register as input to the call.
6635 if (Entry.IsSwiftError && TLI.supportSwiftError()) {
6637 // We find the virtual register for the actual swifterror argument.
6638 // Instead of using the Value, we use the virtual register instead.
6639 Entry.Node = DAG.getRegister(FuncInfo
6640 .getOrCreateSwiftErrorVRegUseAt(
6641 CS.getInstruction(), FuncInfo.MBB, V)
6643 EVT(TLI.getPointerTy(DL)));
6646 Args.push_back(Entry);
6648 // If we have an explicit sret argument that is an Instruction, (i.e., it
6649 // might point to function-local memory), we can't meaningfully tail-call.
6650 if (Entry.IsSRet && isa<Instruction>(V))
6654 // Check if target-independent constraints permit a tail call here.
6655 // Target-dependent constraints are checked within TLI->LowerCallTo.
6656 if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
6659 // Disable tail calls if there is an swifterror argument. Targets have not
6660 // been updated to support tail calls.
6661 if (TLI.supportSwiftError() && SwiftErrorVal)
6664 TargetLowering::CallLoweringInfo CLI(DAG);
6665 CLI.setDebugLoc(getCurSDLoc())
6666 .setChain(getRoot())
6667 .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
6668 .setTailCall(isTailCall)
6669 .setConvergent(CS.isConvergent());
6670 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
6672 if (Result.first.getNode()) {
6673 const Instruction *Inst = CS.getInstruction();
6674 Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
6675 setValue(Inst, Result.first);
6678 // The last element of CLI.InVals has the SDValue for swifterror return.
6679 // Here we copy it to a virtual register and update SwiftErrorMap for
6681 if (SwiftErrorVal && TLI.supportSwiftError()) {
6682 // Get the last element of InVals.
6683 SDValue Src = CLI.InVals.back();
6684 unsigned VReg; bool CreatedVReg;
6685 std::tie(VReg, CreatedVReg) =
6686 FuncInfo.getOrCreateSwiftErrorVRegDefAt(CS.getInstruction());
6687 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
6688 // We update the virtual register for the actual swifterror argument.
6690 FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
6691 DAG.setRoot(CopyNode);
6695 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
6696 SelectionDAGBuilder &Builder) {
6697 // Check to see if this load can be trivially constant folded, e.g. if the
6698 // input is from a string literal.
6699 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
6700 // Cast pointer to the type we really want to load.
6702 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
6703 if (LoadVT.isVector())
6704 LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements());
6706 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
6707 PointerType::getUnqual(LoadTy));
6709 if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
6710 const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
6711 return Builder.getValue(LoadCst);
6714 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
6715 // still constant memory, the input chain can be the entry node.
6717 bool ConstantMemory = false;
6719 // Do not serialize (non-volatile) loads of constant memory with anything.
6720 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
6721 Root = Builder.DAG.getEntryNode();
6722 ConstantMemory = true;
6724 // Do not serialize non-volatile loads against each other.
6725 Root = Builder.DAG.getRoot();
6728 SDValue Ptr = Builder.getValue(PtrVal);
6729 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
6730 Ptr, MachinePointerInfo(PtrVal),
6731 /* Alignment = */ 1);
6733 if (!ConstantMemory)
6734 Builder.PendingLoads.push_back(LoadVal.getValue(1));
6738 /// Record the value for an instruction that produces an integer result,
6739 /// converting the type where necessary.
6740 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
6743 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6746 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
6748 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
6749 setValue(&I, Value);
6752 /// See if we can lower a memcmp call into an optimized form. If so, return
6753 /// true and lower it. Otherwise return false, and it will be lowered like a
6755 /// The caller already checked that \p I calls the appropriate LibFunc with a
6756 /// correct prototype.
6757 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
6758 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
6759 const Value *Size = I.getArgOperand(2);
6760 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
6761 if (CSize && CSize->getZExtValue() == 0) {
6762 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6764 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
6768 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6769 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
6770 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
6771 getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
6772 if (Res.first.getNode()) {
6773 processIntegerCallValue(I, Res.first, true);
6774 PendingLoads.push_back(Res.second);
6778 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
6779 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
6780 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
6783 // If the target has a fast compare for the given size, it will return a
6784 // preferred load type for that size. Require that the load VT is legal and
6785 // that the target supports unaligned loads of that type. Otherwise, return
6787 auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
6788 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6789 MVT LVT = TLI.hasFastEqualityCompare(NumBits);
6790 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
6791 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
6792 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
6793 // TODO: Check alignment of src and dest ptrs.
6794 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
6795 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
6796 if (!TLI.isTypeLegal(LVT) ||
6797 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
6798 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
6799 LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
6805 // This turns into unaligned loads. We only do this if the target natively
6806 // supports the MVT we'll be loading or if it is small enough (<= 4) that
6807 // we'll only produce a small number of byte loads.
6809 unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
6810 switch (NumBitsToCompare) {
6822 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
6826 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
6829 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
6830 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
6832 // Bitcast to a wide integer type if the loads are vectors.
6833 if (LoadVT.isVector()) {
6834 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
6835 LoadL = DAG.getBitcast(CmpVT, LoadL);
6836 LoadR = DAG.getBitcast(CmpVT, LoadR);
6839 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
6840 processIntegerCallValue(I, Cmp, false);
6844 /// See if we can lower a memchr call into an optimized form. If so, return
6845 /// true and lower it. Otherwise return false, and it will be lowered like a
6847 /// The caller already checked that \p I calls the appropriate LibFunc with a
6848 /// correct prototype.
6849 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
6850 const Value *Src = I.getArgOperand(0);
6851 const Value *Char = I.getArgOperand(1);
6852 const Value *Length = I.getArgOperand(2);
6854 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6855 std::pair<SDValue, SDValue> Res =
6856 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
6857 getValue(Src), getValue(Char), getValue(Length),
6858 MachinePointerInfo(Src));
6859 if (Res.first.getNode()) {
6860 setValue(&I, Res.first);
6861 PendingLoads.push_back(Res.second);
6868 /// See if we can lower a mempcpy call into an optimized form. If so, return
6869 /// true and lower it. Otherwise return false, and it will be lowered like a
6871 /// The caller already checked that \p I calls the appropriate LibFunc with a
6872 /// correct prototype.
6873 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
6874 SDValue Dst = getValue(I.getArgOperand(0));
6875 SDValue Src = getValue(I.getArgOperand(1));
6876 SDValue Size = getValue(I.getArgOperand(2));
6878 unsigned DstAlign = DAG.InferPtrAlignment(Dst);
6879 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
6880 unsigned Align = std::min(DstAlign, SrcAlign);
6881 if (Align == 0) // Alignment of one or both could not be inferred.
6882 Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved.
6885 SDLoc sdl = getCurSDLoc();
6887 // In the mempcpy context we need to pass in a false value for isTailCall
6888 // because the return pointer needs to be adjusted by the size of
6889 // the copied memory.
6890 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol,
6891 false, /*isTailCall=*/false,
6892 MachinePointerInfo(I.getArgOperand(0)),
6893 MachinePointerInfo(I.getArgOperand(1)));
6894 assert(MC.getNode() != nullptr &&
6895 "** memcpy should not be lowered as TailCall in mempcpy context **");
6898 // Check if Size needs to be truncated or extended.
6899 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
6901 // Adjust return pointer to point just past the last dst byte.
6902 SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
6904 setValue(&I, DstPlusSize);
6908 /// See if we can lower a strcpy call into an optimized form. If so, return
6909 /// true and lower it, otherwise return false and it will be lowered like a
6911 /// The caller already checked that \p I calls the appropriate LibFunc with a
6912 /// correct prototype.
6913 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
6914 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6916 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6917 std::pair<SDValue, SDValue> Res =
6918 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
6919 getValue(Arg0), getValue(Arg1),
6920 MachinePointerInfo(Arg0),
6921 MachinePointerInfo(Arg1), isStpcpy);
6922 if (Res.first.getNode()) {
6923 setValue(&I, Res.first);
6924 DAG.setRoot(Res.second);
6931 /// See if we can lower a strcmp call into an optimized form. If so, return
6932 /// true and lower it, otherwise return false and it will be lowered like a
6934 /// The caller already checked that \p I calls the appropriate LibFunc with a
6935 /// correct prototype.
6936 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
6937 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6939 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6940 std::pair<SDValue, SDValue> Res =
6941 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
6942 getValue(Arg0), getValue(Arg1),
6943 MachinePointerInfo(Arg0),
6944 MachinePointerInfo(Arg1));
6945 if (Res.first.getNode()) {
6946 processIntegerCallValue(I, Res.first, true);
6947 PendingLoads.push_back(Res.second);
6954 /// See if we can lower a strlen call into an optimized form. If so, return
6955 /// true and lower it, otherwise return false and it will be lowered like a
6957 /// The caller already checked that \p I calls the appropriate LibFunc with a
6958 /// correct prototype.
6959 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
6960 const Value *Arg0 = I.getArgOperand(0);
6962 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6963 std::pair<SDValue, SDValue> Res =
6964 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
6965 getValue(Arg0), MachinePointerInfo(Arg0));
6966 if (Res.first.getNode()) {
6967 processIntegerCallValue(I, Res.first, false);
6968 PendingLoads.push_back(Res.second);
6975 /// See if we can lower a strnlen call into an optimized form. If so, return
6976 /// true and lower it, otherwise return false and it will be lowered like a
6978 /// The caller already checked that \p I calls the appropriate LibFunc with a
6979 /// correct prototype.
6980 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
6981 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6983 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6984 std::pair<SDValue, SDValue> Res =
6985 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
6986 getValue(Arg0), getValue(Arg1),
6987 MachinePointerInfo(Arg0));
6988 if (Res.first.getNode()) {
6989 processIntegerCallValue(I, Res.first, false);
6990 PendingLoads.push_back(Res.second);
6997 /// See if we can lower a unary floating-point operation into an SDNode with
6998 /// the specified Opcode. If so, return true and lower it, otherwise return
6999 /// false and it will be lowered like a normal call.
7000 /// The caller already checked that \p I calls the appropriate LibFunc with a
7001 /// correct prototype.
7002 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
7004 // We already checked this call's prototype; verify it doesn't modify errno.
7005 if (!I.onlyReadsMemory())
7008 SDValue Tmp = getValue(I.getArgOperand(0));
7009 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
7013 /// See if we can lower a binary floating-point operation into an SDNode with
7014 /// the specified Opcode. If so, return true and lower it. Otherwise return
7015 /// false, and it will be lowered like a normal call.
7016 /// The caller already checked that \p I calls the appropriate LibFunc with a
7017 /// correct prototype.
7018 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
7020 // We already checked this call's prototype; verify it doesn't modify errno.
7021 if (!I.onlyReadsMemory())
7024 SDValue Tmp0 = getValue(I.getArgOperand(0));
7025 SDValue Tmp1 = getValue(I.getArgOperand(1));
7026 EVT VT = Tmp0.getValueType();
7027 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
7031 void SelectionDAGBuilder::visitCall(const CallInst &I) {
7032 // Handle inline assembly differently.
7033 if (isa<InlineAsm>(I.getCalledValue())) {
7038 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
7039 computeUsesVAFloatArgument(I, MMI);
7041 const char *RenameFn = nullptr;
7042 if (Function *F = I.getCalledFunction()) {
7043 if (F->isDeclaration()) {
7044 // Is this an LLVM intrinsic or a target-specific intrinsic?
7045 unsigned IID = F->getIntrinsicID();
7047 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
7048 IID = II->getIntrinsicID(F);
7051 RenameFn = visitIntrinsicCall(I, IID);
7057 // Check for well-known libc/libm calls. If the function is internal, it
7058 // can't be a library call. Don't do the check if marked as nobuiltin for
7059 // some reason or the call site requires strict floating point semantics.
7061 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
7062 F->hasName() && LibInfo->getLibFunc(*F, Func) &&
7063 LibInfo->hasOptimizedCodeGen(Func)) {
7066 case LibFunc_copysign:
7067 case LibFunc_copysignf:
7068 case LibFunc_copysignl:
7069 // We already checked this call's prototype; verify it doesn't modify
7071 if (I.onlyReadsMemory()) {
7072 SDValue LHS = getValue(I.getArgOperand(0));
7073 SDValue RHS = getValue(I.getArgOperand(1));
7074 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
7075 LHS.getValueType(), LHS, RHS));
7082 if (visitUnaryFloatCall(I, ISD::FABS))
7088 if (visitBinaryFloatCall(I, ISD::FMINNUM))
7094 if (visitBinaryFloatCall(I, ISD::FMAXNUM))
7100 if (visitUnaryFloatCall(I, ISD::FSIN))
7106 if (visitUnaryFloatCall(I, ISD::FCOS))
7112 case LibFunc_sqrt_finite:
7113 case LibFunc_sqrtf_finite:
7114 case LibFunc_sqrtl_finite:
7115 if (visitUnaryFloatCall(I, ISD::FSQRT))
7119 case LibFunc_floorf:
7120 case LibFunc_floorl:
7121 if (visitUnaryFloatCall(I, ISD::FFLOOR))
7124 case LibFunc_nearbyint:
7125 case LibFunc_nearbyintf:
7126 case LibFunc_nearbyintl:
7127 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
7133 if (visitUnaryFloatCall(I, ISD::FCEIL))
7139 if (visitUnaryFloatCall(I, ISD::FRINT))
7143 case LibFunc_roundf:
7144 case LibFunc_roundl:
7145 if (visitUnaryFloatCall(I, ISD::FROUND))
7149 case LibFunc_truncf:
7150 case LibFunc_truncl:
7151 if (visitUnaryFloatCall(I, ISD::FTRUNC))
7157 if (visitUnaryFloatCall(I, ISD::FLOG2))
7163 if (visitUnaryFloatCall(I, ISD::FEXP2))
7166 case LibFunc_memcmp:
7167 if (visitMemCmpCall(I))
7170 case LibFunc_mempcpy:
7171 if (visitMemPCpyCall(I))
7174 case LibFunc_memchr:
7175 if (visitMemChrCall(I))
7178 case LibFunc_strcpy:
7179 if (visitStrCpyCall(I, false))
7182 case LibFunc_stpcpy:
7183 if (visitStrCpyCall(I, true))
7186 case LibFunc_strcmp:
7187 if (visitStrCmpCall(I))
7190 case LibFunc_strlen:
7191 if (visitStrLenCall(I))
7194 case LibFunc_strnlen:
7195 if (visitStrNLenCall(I))
7204 Callee = getValue(I.getCalledValue());
7206 Callee = DAG.getExternalSymbol(
7208 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
7210 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7211 // have to do anything here to lower funclet bundles.
7212 assert(!I.hasOperandBundlesOtherThan(
7213 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
7214 "Cannot lower calls with arbitrary operand bundles!");
7216 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
7217 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
7219 // Check if we can potentially perform a tail call. More detailed checking
7220 // is be done within LowerCallTo, after more information about the call is
7222 LowerCallTo(&I, Callee, I.isTailCall());
7227 /// AsmOperandInfo - This contains information for each constraint that we are
7229 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
7231 /// CallOperand - If this is the result output operand or a clobber
7232 /// this is null, otherwise it is the incoming operand to the CallInst.
7233 /// This gets modified as the asm is processed.
7234 SDValue CallOperand;
7236 /// AssignedRegs - If this is a register or register class operand, this
7237 /// contains the set of register corresponding to the operand.
7238 RegsForValue AssignedRegs;
7240 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
7241 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
7244 /// Whether or not this operand accesses memory
7245 bool hasMemory(const TargetLowering &TLI) const {
7246 // Indirect operand accesses access memory.
7250 for (const auto &Code : Codes)
7251 if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
7257 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7258 /// corresponds to. If there is no Value* for this operand, it returns
7260 EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
7261 const DataLayout &DL) const {
7262 if (!CallOperandVal) return MVT::Other;
7264 if (isa<BasicBlock>(CallOperandVal))
7265 return TLI.getPointerTy(DL);
7267 llvm::Type *OpTy = CallOperandVal->getType();
7269 // FIXME: code duplicated from TargetLowering::ParseConstraints().
7270 // If this is an indirect operand, the operand is a pointer to the
7273 PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
7275 report_fatal_error("Indirect operand for inline asm not a pointer!");
7276 OpTy = PtrTy->getElementType();
7279 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7280 if (StructType *STy = dyn_cast<StructType>(OpTy))
7281 if (STy->getNumElements() == 1)
7282 OpTy = STy->getElementType(0);
7284 // If OpTy is not a single value, it may be a struct/union that we
7285 // can tile with integers.
7286 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
7287 unsigned BitSize = DL.getTypeSizeInBits(OpTy);
7296 OpTy = IntegerType::get(Context, BitSize);
7301 return TLI.getValueType(DL, OpTy, true);
7305 using SDISelAsmOperandInfoVector = SmallVector<SDISelAsmOperandInfo, 16>;
7307 } // end anonymous namespace
7309 /// Make sure that the output operand \p OpInfo and its corresponding input
7310 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7312 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
7313 SDISelAsmOperandInfo &MatchingOpInfo,
7314 SelectionDAG &DAG) {
7315 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7318 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
7319 const auto &TLI = DAG.getTargetLoweringInfo();
7321 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7322 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
7323 OpInfo.ConstraintVT);
7324 std::pair<unsigned, const TargetRegisterClass *> InputRC =
7325 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7326 MatchingOpInfo.ConstraintVT);
7327 if ((OpInfo.ConstraintVT.isInteger() !=
7328 MatchingOpInfo.ConstraintVT.isInteger()) ||
7329 (MatchRC.second != InputRC.second)) {
7330 // FIXME: error out in a more elegant fashion
7331 report_fatal_error("Unsupported asm: input constraint"
7332 " with a matching output constraint of"
7333 " incompatible type!");
7335 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7338 /// Get a direct memory input to behave well as an indirect operand.
7339 /// This may introduce stores, hence the need for a \p Chain.
7340 /// \return The (possibly updated) chain.
7341 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
7342 SDISelAsmOperandInfo &OpInfo,
7343 SelectionDAG &DAG) {
7344 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7346 // If we don't have an indirect input, put it in the constpool if we can,
7347 // otherwise spill it to a stack slot.
7348 // TODO: This isn't quite right. We need to handle these according to
7349 // the addressing mode that the constraint wants. Also, this may take
7350 // an additional register for the computation and we don't want that
7353 // If the operand is a float, integer, or vector constant, spill to a
7354 // constant pool entry to get its address.
7355 const Value *OpVal = OpInfo.CallOperandVal;
7356 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7357 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
7358 OpInfo.CallOperand = DAG.getConstantPool(
7359 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
7363 // Otherwise, create a stack slot and emit a store to it before the asm.
7364 Type *Ty = OpVal->getType();
7365 auto &DL = DAG.getDataLayout();
7366 uint64_t TySize = DL.getTypeAllocSize(Ty);
7367 unsigned Align = DL.getPrefTypeAlignment(Ty);
7368 MachineFunction &MF = DAG.getMachineFunction();
7369 int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7370 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
7371 Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot,
7372 MachinePointerInfo::getFixedStack(MF, SSFI));
7373 OpInfo.CallOperand = StackSlot;
7378 /// GetRegistersForValue - Assign registers (virtual or physical) for the
7379 /// specified operand. We prefer to assign virtual registers, to allow the
7380 /// register allocator to handle the assignment process. However, if the asm
7381 /// uses features that we can't model on machineinstrs, we have SDISel do the
7382 /// allocation. This produces generally horrible, but correct, code.
7384 /// OpInfo describes the operand
7385 /// RefOpInfo describes the matching operand if any, the operand otherwise
7386 static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI,
7387 const SDLoc &DL, SDISelAsmOperandInfo &OpInfo,
7388 SDISelAsmOperandInfo &RefOpInfo) {
7389 LLVMContext &Context = *DAG.getContext();
7391 MachineFunction &MF = DAG.getMachineFunction();
7392 SmallVector<unsigned, 4> Regs;
7393 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7395 // If this is a constraint for a single physreg, or a constraint for a
7396 // register class, find it.
7397 std::pair<unsigned, const TargetRegisterClass *> PhysReg =
7398 TLI.getRegForInlineAsmConstraint(&TRI, RefOpInfo.ConstraintCode,
7399 RefOpInfo.ConstraintVT);
7401 unsigned NumRegs = 1;
7402 if (OpInfo.ConstraintVT != MVT::Other) {
7403 // If this is an FP operand in an integer register (or visa versa), or more
7404 // generally if the operand value disagrees with the register class we plan
7405 // to stick it in, fix the operand type.
7407 // If this is an input value, the bitcast to the new type is done now.
7408 // Bitcast for output value is done at the end of visitInlineAsm().
7409 if ((OpInfo.Type == InlineAsm::isOutput ||
7410 OpInfo.Type == InlineAsm::isInput) &&
7412 !TRI.isTypeLegalForClass(*PhysReg.second, OpInfo.ConstraintVT)) {
7413 // Try to convert to the first EVT that the reg class contains. If the
7414 // types are identical size, use a bitcast to convert (e.g. two differing
7415 // vector types). Note: output bitcast is done at the end of
7416 // visitInlineAsm().
7417 MVT RegVT = *TRI.legalclasstypes_begin(*PhysReg.second);
7418 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
7419 // Exclude indirect inputs while they are unsupported because the code
7420 // to perform the load is missing and thus OpInfo.CallOperand still
7421 // refers to the input address rather than the pointed-to value.
7422 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
7423 OpInfo.CallOperand =
7424 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7425 OpInfo.ConstraintVT = RegVT;
7426 // If the operand is an FP value and we want it in integer registers,
7427 // use the corresponding integer type. This turns an f64 value into
7428 // i64, which can be passed with two i32 values on a 32-bit machine.
7429 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
7430 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
7431 if (OpInfo.Type == InlineAsm::isInput)
7432 OpInfo.CallOperand =
7433 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7434 OpInfo.ConstraintVT = RegVT;
7438 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
7441 // No need to allocate a matching input constraint since the constraint it's
7442 // matching to has already been allocated.
7443 if (OpInfo.isMatchingInputConstraint())
7447 EVT ValueVT = OpInfo.ConstraintVT;
7449 // If this is a constraint for a specific physical register, like {r17},
7451 if (unsigned AssignedReg = PhysReg.first) {
7452 const TargetRegisterClass *RC = PhysReg.second;
7453 if (OpInfo.ConstraintVT == MVT::Other)
7454 ValueVT = *TRI.legalclasstypes_begin(*RC);
7456 // Get the actual register value type. This is important, because the user
7457 // may have asked for (e.g.) the AX register in i32 type. We need to
7458 // remember that AX is actually i16 to get the right extension.
7459 RegVT = *TRI.legalclasstypes_begin(*RC);
7461 // This is an explicit reference to a physical register.
7462 Regs.push_back(AssignedReg);
7464 // If this is an expanded reference, add the rest of the regs to Regs.
7466 TargetRegisterClass::iterator I = RC->begin();
7467 for (; *I != AssignedReg; ++I)
7468 assert(I != RC->end() && "Didn't find reg!");
7470 // Already added the first reg.
7472 for (; NumRegs; --NumRegs, ++I) {
7473 assert(I != RC->end() && "Ran out of registers to allocate!");
7478 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
7482 // Otherwise, if this was a reference to an LLVM register class, create vregs
7483 // for this reference.
7484 if (const TargetRegisterClass *RC = PhysReg.second) {
7485 RegVT = *TRI.legalclasstypes_begin(*RC);
7486 if (OpInfo.ConstraintVT == MVT::Other)
7489 // Create the appropriate number of virtual registers.
7490 MachineRegisterInfo &RegInfo = MF.getRegInfo();
7491 for (; NumRegs; --NumRegs)
7492 Regs.push_back(RegInfo.createVirtualRegister(RC));
7494 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
7498 // Otherwise, we couldn't allocate enough registers for this.
7502 findMatchingInlineAsmOperand(unsigned OperandNo,
7503 const std::vector<SDValue> &AsmNodeOperands) {
7504 // Scan until we find the definition we already emitted of this operand.
7505 unsigned CurOp = InlineAsm::Op_FirstOperand;
7506 for (; OperandNo; --OperandNo) {
7507 // Advance to the next operand.
7509 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7510 assert((InlineAsm::isRegDefKind(OpFlag) ||
7511 InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
7512 InlineAsm::isMemKind(OpFlag)) &&
7513 "Skipped past definitions?");
7514 CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
7519 /// Fill \p Regs with \p NumRegs new virtual registers of type \p RegVT
7520 /// \return true if it has succeeded, false otherwise
7521 static bool createVirtualRegs(SmallVector<unsigned, 4> &Regs, unsigned NumRegs,
7522 MVT RegVT, SelectionDAG &DAG) {
7523 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7524 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
7525 for (unsigned i = 0, e = NumRegs; i != e; ++i) {
7526 if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
7527 Regs.push_back(RegInfo.createVirtualRegister(RC));
7540 explicit ExtraFlags(ImmutableCallSite CS) {
7541 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
7542 if (IA->hasSideEffects())
7543 Flags |= InlineAsm::Extra_HasSideEffects;
7544 if (IA->isAlignStack())
7545 Flags |= InlineAsm::Extra_IsAlignStack;
7546 if (CS.isConvergent())
7547 Flags |= InlineAsm::Extra_IsConvergent;
7548 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
7551 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
7552 // Ideally, we would only check against memory constraints. However, the
7553 // meaning of an Other constraint can be target-specific and we can't easily
7554 // reason about it. Therefore, be conservative and set MayLoad/MayStore
7555 // for Other constraints as well.
7556 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
7557 OpInfo.ConstraintType == TargetLowering::C_Other) {
7558 if (OpInfo.Type == InlineAsm::isInput)
7559 Flags |= InlineAsm::Extra_MayLoad;
7560 else if (OpInfo.Type == InlineAsm::isOutput)
7561 Flags |= InlineAsm::Extra_MayStore;
7562 else if (OpInfo.Type == InlineAsm::isClobber)
7563 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
7567 unsigned get() const { return Flags; }
7570 } // end anonymous namespace
7572 /// visitInlineAsm - Handle a call to an InlineAsm object.
7573 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
7574 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
7576 /// ConstraintOperands - Information about all of the constraints.
7577 SDISelAsmOperandInfoVector ConstraintOperands;
7579 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7580 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
7581 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
7583 bool hasMemory = false;
7585 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
7586 ExtraFlags ExtraInfo(CS);
7588 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
7589 unsigned ResNo = 0; // ResNo - The result number of the next output.
7590 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
7591 ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
7592 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
7594 MVT OpVT = MVT::Other;
7596 // Compute the value type for each operand.
7597 if (OpInfo.Type == InlineAsm::isInput ||
7598 (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
7599 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
7601 // Process the call argument. BasicBlocks are labels, currently appearing
7603 if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
7604 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
7606 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
7611 .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
7615 if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
7616 // The return value of the call is this value. As such, there is no
7617 // corresponding argument.
7618 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7619 if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
7620 OpVT = TLI.getSimpleValueType(DAG.getDataLayout(),
7621 STy->getElementType(ResNo));
7623 assert(ResNo == 0 && "Asm only has one result!");
7624 OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
7629 OpInfo.ConstraintVT = OpVT;
7632 hasMemory = OpInfo.hasMemory(TLI);
7634 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
7635 // FIXME: Could we compute this on OpInfo rather than TargetConstraints[i]?
7636 auto TargetConstraint = TargetConstraints[i];
7638 // Compute the constraint code and ConstraintType to use.
7639 TLI.ComputeConstraintToUse(TargetConstraint, SDValue());
7641 ExtraInfo.update(TargetConstraint);
7644 SDValue Chain, Flag;
7646 // We won't need to flush pending loads if this asm doesn't touch
7647 // memory and is nonvolatile.
7648 if (hasMemory || IA->hasSideEffects())
7651 Chain = DAG.getRoot();
7653 // Second pass over the constraints: compute which constraint option to use
7654 // and assign registers to constraints that want a specific physreg.
7655 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7656 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7658 // If this is an output operand with a matching input operand, look up the
7659 // matching input. If their types mismatch, e.g. one is an integer, the
7660 // other is floating point, or their sizes are different, flag it as an
7662 if (OpInfo.hasMatchingInput()) {
7663 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
7664 patchMatchingInput(OpInfo, Input, DAG);
7667 // Compute the constraint code and ConstraintType to use.
7668 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
7670 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
7671 OpInfo.Type == InlineAsm::isClobber)
7674 // If this is a memory input, and if the operand is not indirect, do what we
7675 // need to provide an address for the memory input.
7676 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
7677 !OpInfo.isIndirect) {
7678 assert((OpInfo.isMultipleAlternative ||
7679 (OpInfo.Type == InlineAsm::isInput)) &&
7680 "Can only indirectify direct input operands!");
7682 // Memory operands really want the address of the value.
7683 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
7685 // There is no longer a Value* corresponding to this operand.
7686 OpInfo.CallOperandVal = nullptr;
7688 // It is now an indirect operand.
7689 OpInfo.isIndirect = true;
7692 // If this constraint is for a specific register, allocate it before
7694 SDISelAsmOperandInfo &RefOpInfo =
7695 OpInfo.isMatchingInputConstraint()
7696 ? ConstraintOperands[OpInfo.getMatchedOperand()]
7697 : ConstraintOperands[i];
7698 if (RefOpInfo.ConstraintType == TargetLowering::C_Register)
7699 GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo);
7702 // Third pass - Loop over all of the operands, assigning virtual or physregs
7703 // to register class operands.
7704 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7705 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7706 SDISelAsmOperandInfo &RefOpInfo =
7707 OpInfo.isMatchingInputConstraint()
7708 ? ConstraintOperands[OpInfo.getMatchedOperand()]
7709 : ConstraintOperands[i];
7711 // C_Register operands have already been allocated, Other/Memory don't need
7713 if (RefOpInfo.ConstraintType == TargetLowering::C_RegisterClass)
7714 GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo);
7717 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
7718 std::vector<SDValue> AsmNodeOperands;
7719 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
7720 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
7721 IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
7723 // If we have a !srcloc metadata node associated with it, we want to attach
7724 // this to the ultimately generated inline asm machineinstr. To do this, we
7725 // pass in the third operand as this (potentially null) inline asm MDNode.
7726 const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
7727 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
7729 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
7730 // bits as operand 3.
7731 AsmNodeOperands.push_back(DAG.getTargetConstant(
7732 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7734 // Loop over all of the inputs, copying the operand values into the
7735 // appropriate registers and processing the output regs.
7736 RegsForValue RetValRegs;
7738 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
7739 std::vector<std::pair<RegsForValue, Value *>> IndirectStoresToEmit;
7741 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7742 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7744 switch (OpInfo.Type) {
7745 case InlineAsm::isOutput:
7746 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
7747 OpInfo.ConstraintType != TargetLowering::C_Register) {
7748 // Memory output, or 'other' output (e.g. 'X' constraint).
7749 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
7751 unsigned ConstraintID =
7752 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7753 assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7754 "Failed to convert memory constraint code to constraint id.");
7756 // Add information to the INLINEASM node to know about this output.
7757 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7758 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
7759 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
7761 AsmNodeOperands.push_back(OpInfo.CallOperand);
7765 // Otherwise, this is a register or register class output.
7767 // Copy the output from the appropriate register. Find a register that
7769 if (OpInfo.AssignedRegs.Regs.empty()) {
7771 CS, "couldn't allocate output register for constraint '" +
7772 Twine(OpInfo.ConstraintCode) + "'");
7776 // If this is an indirect operand, store through the pointer after the
7778 if (OpInfo.isIndirect) {
7779 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
7780 OpInfo.CallOperandVal));
7782 // This is the result value of the call.
7783 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7784 // Concatenate this output onto the outputs list.
7785 RetValRegs.append(OpInfo.AssignedRegs);
7788 // Add information to the INLINEASM node to know that this register is
7791 .AddInlineAsmOperands(OpInfo.isEarlyClobber
7792 ? InlineAsm::Kind_RegDefEarlyClobber
7793 : InlineAsm::Kind_RegDef,
7794 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
7797 case InlineAsm::isInput: {
7798 SDValue InOperandVal = OpInfo.CallOperand;
7800 if (OpInfo.isMatchingInputConstraint()) {
7801 // If this is required to match an output register we have already set,
7802 // just use its register.
7803 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
7806 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7807 if (InlineAsm::isRegDefKind(OpFlag) ||
7808 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
7809 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
7810 if (OpInfo.isIndirect) {
7811 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
7812 emitInlineAsmError(CS, "inline asm not supported yet:"
7813 " don't know how to handle tied "
7814 "indirect register inputs");
7818 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
7819 SmallVector<unsigned, 4> Regs;
7821 if (!createVirtualRegs(Regs,
7822 InlineAsm::getNumOperandRegisters(OpFlag),
7824 emitInlineAsmError(CS, "inline asm error: This value type register "
7825 "class is not natively supported!");
7829 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
7831 SDLoc dl = getCurSDLoc();
7832 // Use the produced MatchedRegs object to
7833 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
7834 CS.getInstruction());
7835 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
7836 true, OpInfo.getMatchedOperand(), dl,
7837 DAG, AsmNodeOperands);
7841 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
7842 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
7843 "Unexpected number of operands");
7844 // Add information to the INLINEASM node to know about this input.
7845 // See InlineAsm.h isUseOperandTiedToDef.
7846 OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
7847 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
7848 OpInfo.getMatchedOperand());
7849 AsmNodeOperands.push_back(DAG.getTargetConstant(
7850 OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7851 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
7855 // Treat indirect 'X' constraint as memory.
7856 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
7858 OpInfo.ConstraintType = TargetLowering::C_Memory;
7860 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
7861 std::vector<SDValue> Ops;
7862 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
7865 emitInlineAsmError(CS, "invalid operand for inline asm constraint '" +
7866 Twine(OpInfo.ConstraintCode) + "'");
7870 // Add information to the INLINEASM node to know about this input.
7871 unsigned ResOpType =
7872 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
7873 AsmNodeOperands.push_back(DAG.getTargetConstant(
7874 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7875 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
7879 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
7880 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
7881 assert(InOperandVal.getValueType() ==
7882 TLI.getPointerTy(DAG.getDataLayout()) &&
7883 "Memory operands expect pointer values");
7885 unsigned ConstraintID =
7886 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7887 assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7888 "Failed to convert memory constraint code to constraint id.");
7890 // Add information to the INLINEASM node to know about this input.
7891 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7892 ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
7893 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
7896 AsmNodeOperands.push_back(InOperandVal);
7900 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
7901 OpInfo.ConstraintType == TargetLowering::C_Register) &&
7902 "Unknown constraint type!");
7904 // TODO: Support this.
7905 if (OpInfo.isIndirect) {
7907 CS, "Don't know how to handle indirect register inputs yet "
7908 "for constraint '" +
7909 Twine(OpInfo.ConstraintCode) + "'");
7913 // Copy the input into the appropriate registers.
7914 if (OpInfo.AssignedRegs.Regs.empty()) {
7915 emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" +
7916 Twine(OpInfo.ConstraintCode) + "'");
7920 SDLoc dl = getCurSDLoc();
7922 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7923 Chain, &Flag, CS.getInstruction());
7925 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
7926 dl, DAG, AsmNodeOperands);
7929 case InlineAsm::isClobber:
7930 // Add the clobbered value to the operand list, so that the register
7931 // allocator is aware that the physreg got clobbered.
7932 if (!OpInfo.AssignedRegs.Regs.empty())
7933 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
7934 false, 0, getCurSDLoc(), DAG,
7940 // Finish up input operands. Set the input chain and add the flag last.
7941 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
7942 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
7944 Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
7945 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
7946 Flag = Chain.getValue(1);
7948 // If this asm returns a register value, copy the result from that register
7949 // and set it as the value of the call.
7950 if (!RetValRegs.Regs.empty()) {
7951 SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7952 Chain, &Flag, CS.getInstruction());
7954 llvm::Type *CSResultType = CS.getType();
7956 ArrayRef<Type *> ResultTypes;
7957 SmallVector<SDValue, 1> ResultValues(1);
7958 if (CSResultType->isSingleValueType()) {
7960 ResultValues[0] = Val;
7961 ResultTypes = makeArrayRef(CSResultType);
7963 numRet = CSResultType->getNumContainedTypes();
7964 assert(Val->getNumOperands() == numRet &&
7965 "Mismatch in number of output operands in asm result");
7966 ResultTypes = CSResultType->subtypes();
7967 ArrayRef<SDUse> ValueUses = Val->ops();
7968 ResultValues.resize(numRet);
7969 std::transform(ValueUses.begin(), ValueUses.end(), ResultValues.begin(),
7970 [](const SDUse &u) -> SDValue { return u.get(); });
7972 SmallVector<EVT, 1> ResultVTs(numRet);
7973 for (unsigned i = 0; i < numRet; i++) {
7974 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), ResultTypes[i]);
7975 SDValue Val = ResultValues[i];
7976 assert(ResultTypes[i]->isSized() && "Unexpected unsized type");
7977 // If the type of the inline asm call site return value is different but
7978 // has same size as the type of the asm output bitcast it. One example
7979 // of this is for vectors with different width / number of elements.
7980 // This can happen for register classes that can contain multiple
7981 // different value types. The preg or vreg allocated may not have the
7982 // same VT as was expected.
7984 // This can also happen for a return value that disagrees with the
7985 // register class it is put in, eg. a double in a general-purpose
7986 // register on a 32-bit machine.
7987 if (ResultVT != Val.getValueType() &&
7988 ResultVT.getSizeInBits() == Val.getValueSizeInBits())
7989 Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, Val);
7990 else if (ResultVT != Val.getValueType() && ResultVT.isInteger() &&
7991 Val.getValueType().isInteger()) {
7992 // If a result value was tied to an input value, the computed result
7993 // may have a wider width than the expected result. Extract the
7994 // relevant portion.
7995 Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, Val);
7998 assert(ResultVT == Val.getValueType() && "Asm result value mismatch!");
7999 ResultVTs[i] = ResultVT;
8000 ResultValues[i] = Val;
8003 Val = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
8004 DAG.getVTList(ResultVTs), ResultValues);
8005 setValue(CS.getInstruction(), Val);
8006 // Don't need to use this as a chain in this case.
8007 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
8011 std::vector<std::pair<SDValue, const Value *>> StoresToEmit;
8013 // Process indirect outputs, first output all of the flagged copies out of
8015 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
8016 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
8017 const Value *Ptr = IndirectStoresToEmit[i].second;
8018 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
8020 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
8023 // Emit the non-flagged stores from the physregs.
8024 SmallVector<SDValue, 8> OutChains;
8025 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
8026 SDValue Val = DAG.getStore(Chain, getCurSDLoc(), StoresToEmit[i].first,
8027 getValue(StoresToEmit[i].second),
8028 MachinePointerInfo(StoresToEmit[i].second));
8029 OutChains.push_back(Val);
8032 if (!OutChains.empty())
8033 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
8038 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS,
8039 const Twine &Message) {
8040 LLVMContext &Ctx = *DAG.getContext();
8041 Ctx.emitError(CS.getInstruction(), Message);
8043 // Make sure we leave the DAG in a valid state
8044 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8045 SmallVector<EVT, 1> ValueVTs;
8046 ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
8048 if (ValueVTs.empty())
8051 SmallVector<SDValue, 1> Ops;
8052 for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
8053 Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
8055 setValue(CS.getInstruction(), DAG.getMergeValues(Ops, getCurSDLoc()));
8058 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
8059 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
8060 MVT::Other, getRoot(),
8061 getValue(I.getArgOperand(0)),
8062 DAG.getSrcValue(I.getArgOperand(0))));
8065 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
8066 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8067 const DataLayout &DL = DAG.getDataLayout();
8068 SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()),
8069 getCurSDLoc(), getRoot(), getValue(I.getOperand(0)),
8070 DAG.getSrcValue(I.getOperand(0)),
8071 DL.getABITypeAlignment(I.getType()));
8073 DAG.setRoot(V.getValue(1));
8076 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
8077 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
8078 MVT::Other, getRoot(),
8079 getValue(I.getArgOperand(0)),
8080 DAG.getSrcValue(I.getArgOperand(0))));
8083 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
8084 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
8085 MVT::Other, getRoot(),
8086 getValue(I.getArgOperand(0)),
8087 getValue(I.getArgOperand(1)),
8088 DAG.getSrcValue(I.getArgOperand(0)),
8089 DAG.getSrcValue(I.getArgOperand(1))));
8092 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
8093 const Instruction &I,
8095 const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
8099 ConstantRange CR = getConstantRangeFromMetadata(*Range);
8100 if (CR.isFullSet() || CR.isEmptySet() || CR.isWrappedSet())
8103 APInt Lo = CR.getUnsignedMin();
8104 if (!Lo.isMinValue())
8107 APInt Hi = CR.getUnsignedMax();
8108 unsigned Bits = std::max(Hi.getActiveBits(),
8109 static_cast<unsigned>(IntegerType::MIN_INT_BITS));
8111 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8113 SDLoc SL = getCurSDLoc();
8115 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
8116 DAG.getValueType(SmallVT));
8117 unsigned NumVals = Op.getNode()->getNumValues();
8121 SmallVector<SDValue, 4> Ops;
8123 Ops.push_back(ZExt);
8124 for (unsigned I = 1; I != NumVals; ++I)
8125 Ops.push_back(Op.getValue(I));
8127 return DAG.getMergeValues(Ops, SL);
8130 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
8131 /// the call being lowered.
8133 /// This is a helper for lowering intrinsics that follow a target calling
8134 /// convention or require stack pointer adjustment. Only a subset of the
8135 /// intrinsic's operands need to participate in the calling convention.
8136 void SelectionDAGBuilder::populateCallLoweringInfo(
8137 TargetLowering::CallLoweringInfo &CLI, ImmutableCallSite CS,
8138 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
8139 bool IsPatchPoint) {
8140 TargetLowering::ArgListTy Args;
8141 Args.reserve(NumArgs);
8143 // Populate the argument list.
8144 // Attributes for args start at offset 1, after the return attribute.
8145 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
8146 ArgI != ArgE; ++ArgI) {
8147 const Value *V = CS->getOperand(ArgI);
8149 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
8151 TargetLowering::ArgListEntry Entry;
8152 Entry.Node = getValue(V);
8153 Entry.Ty = V->getType();
8154 Entry.setAttributes(&CS, ArgI);
8155 Args.push_back(Entry);
8158 CLI.setDebugLoc(getCurSDLoc())
8159 .setChain(getRoot())
8160 .setCallee(CS.getCallingConv(), ReturnTy, Callee, std::move(Args))
8161 .setDiscardResult(CS->use_empty())
8162 .setIsPatchPoint(IsPatchPoint);
8165 /// Add a stack map intrinsic call's live variable operands to a stackmap
8166 /// or patchpoint target node's operand list.
8168 /// Constants are converted to TargetConstants purely as an optimization to
8169 /// avoid constant materialization and register allocation.
8171 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8172 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
8173 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8174 /// address materialization and register allocation, but may also be required
8175 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8176 /// alloca in the entry block, then the runtime may assume that the alloca's
8177 /// StackMap location can be read immediately after compilation and that the
8178 /// location is valid at any point during execution (this is similar to the
8179 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8180 /// only available in a register, then the runtime would need to trap when
8181 /// execution reaches the StackMap in order to read the alloca's location.
8182 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
8183 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
8184 SelectionDAGBuilder &Builder) {
8185 for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
8186 SDValue OpVal = Builder.getValue(CS.getArgument(i));
8187 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
8189 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
8191 Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
8192 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
8193 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
8194 Ops.push_back(Builder.DAG.getTargetFrameIndex(
8195 FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
8197 Ops.push_back(OpVal);
8201 /// Lower llvm.experimental.stackmap directly to its target opcode.
8202 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
8203 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8204 // [live variables...])
8206 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
8208 SDValue Chain, InFlag, Callee, NullPtr;
8209 SmallVector<SDValue, 32> Ops;
8211 SDLoc DL = getCurSDLoc();
8212 Callee = getValue(CI.getCalledValue());
8213 NullPtr = DAG.getIntPtrConstant(0, DL, true);
8215 // The stackmap intrinsic only records the live variables (the arguemnts
8216 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8217 // intrinsic, this won't be lowered to a function call. This means we don't
8218 // have to worry about calling conventions and target specific lowering code.
8219 // Instead we perform the call lowering right here.
8221 // chain, flag = CALLSEQ_START(chain, 0, 0)
8222 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8223 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8225 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
8226 InFlag = Chain.getValue(1);
8228 // Add the <id> and <numBytes> constants.
8229 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
8230 Ops.push_back(DAG.getTargetConstant(
8231 cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
8232 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
8233 Ops.push_back(DAG.getTargetConstant(
8234 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8237 // Push live variables for the stack map.
8238 addStackMapLiveVars(&CI, 2, DL, Ops, *this);
8240 // We are not pushing any register mask info here on the operands list,
8241 // because the stackmap doesn't clobber anything.
8243 // Push the chain and the glue flag.
8244 Ops.push_back(Chain);
8245 Ops.push_back(InFlag);
8247 // Create the STACKMAP node.
8248 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8249 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
8250 Chain = SDValue(SM, 0);
8251 InFlag = Chain.getValue(1);
8253 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
8255 // Stackmaps don't generate values, so nothing goes into the NodeMap.
8257 // Set the root to the target-lowered call chain.
8260 // Inform the Frame Information that we have a stackmap in this function.
8261 FuncInfo.MF->getFrameInfo().setHasStackMap();
8264 /// Lower llvm.experimental.patchpoint directly to its target opcode.
8265 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
8266 const BasicBlock *EHPadBB) {
8267 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8272 // [live variables...])
8274 CallingConv::ID CC = CS.getCallingConv();
8275 bool IsAnyRegCC = CC == CallingConv::AnyReg;
8276 bool HasDef = !CS->getType()->isVoidTy();
8277 SDLoc dl = getCurSDLoc();
8278 SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos));
8280 // Handle immediate and symbolic callees.
8281 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8282 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
8284 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8285 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
8286 SDLoc(SymbolicCallee),
8287 SymbolicCallee->getValueType(0));
8289 // Get the real number of arguments participating in the call <numArgs>
8290 SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
8291 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8293 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8294 // Intrinsics include all meta-operands up to but not including CC.
8295 unsigned NumMetaOpers = PatchPointOpers::CCPos;
8296 assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
8297 "Not enough arguments provided to the patchpoint intrinsic");
8299 // For AnyRegCC the arguments are lowered later on manually.
8300 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8302 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
8304 TargetLowering::CallLoweringInfo CLI(DAG);
8305 populateCallLoweringInfo(CLI, CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy,
8307 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8309 SDNode *CallEnd = Result.second.getNode();
8310 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
8311 CallEnd = CallEnd->getOperand(0).getNode();
8313 /// Get a call instruction from the call sequence chain.
8314 /// Tail calls are not allowed.
8315 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
8316 "Expected a callseq node.");
8317 SDNode *Call = CallEnd->getOperand(0).getNode();
8318 bool HasGlue = Call->getGluedNode();
8320 // Replace the target specific call node with the patchable intrinsic.
8321 SmallVector<SDValue, 8> Ops;
8323 // Add the <id> and <numBytes> constants.
8324 SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
8325 Ops.push_back(DAG.getTargetConstant(
8326 cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
8327 SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
8328 Ops.push_back(DAG.getTargetConstant(
8329 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8333 Ops.push_back(Callee);
8335 // Adjust <numArgs> to account for any arguments that have been passed on the
8337 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8338 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8339 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8340 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
8342 // Add the calling convention
8343 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
8345 // Add the arguments we omitted previously. The register allocator should
8346 // place these in any free register.
8348 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
8349 Ops.push_back(getValue(CS.getArgument(i)));
8351 // Push the arguments from the call instruction up to the register mask.
8352 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
8353 Ops.append(Call->op_begin() + 2, e);
8355 // Push live variables for the stack map.
8356 addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this);
8358 // Push the register mask info.
8360 Ops.push_back(*(Call->op_end()-2));
8362 Ops.push_back(*(Call->op_end()-1));
8364 // Push the chain (this is originally the first operand of the call, but
8365 // becomes now the last or second to last operand).
8366 Ops.push_back(*(Call->op_begin()));
8368 // Push the glue flag (last operand).
8370 Ops.push_back(*(Call->op_end()-1));
8373 if (IsAnyRegCC && HasDef) {
8374 // Create the return types based on the intrinsic definition
8375 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8376 SmallVector<EVT, 3> ValueVTs;
8377 ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
8378 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
8380 // There is always a chain and a glue type at the end
8381 ValueVTs.push_back(MVT::Other);
8382 ValueVTs.push_back(MVT::Glue);
8383 NodeTys = DAG.getVTList(ValueVTs);
8385 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8387 // Replace the target specific call node with a PATCHPOINT node.
8388 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
8391 // Update the NodeMap.
8394 setValue(CS.getInstruction(), SDValue(MN, 0));
8396 setValue(CS.getInstruction(), Result.first);
8399 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
8400 // call sequence. Furthermore the location of the chain and glue can change
8401 // when the AnyReg calling convention is used and the intrinsic returns a
8403 if (IsAnyRegCC && HasDef) {
8404 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
8405 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
8406 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
8408 DAG.ReplaceAllUsesWith(Call, MN);
8409 DAG.DeleteNode(Call);
8411 // Inform the Frame Information that we have a patchpoint in this function.
8412 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
8415 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
8416 unsigned Intrinsic) {
8417 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8418 SDValue Op1 = getValue(I.getArgOperand(0));
8420 if (I.getNumArgOperands() > 1)
8421 Op2 = getValue(I.getArgOperand(1));
8422 SDLoc dl = getCurSDLoc();
8423 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8426 if (isa<FPMathOperator>(I))
8427 FMF = I.getFastMathFlags();
8429 switch (Intrinsic) {
8430 case Intrinsic::experimental_vector_reduce_fadd:
8432 Res = DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2);
8434 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2);
8436 case Intrinsic::experimental_vector_reduce_fmul:
8438 Res = DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2);
8440 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2);
8442 case Intrinsic::experimental_vector_reduce_add:
8443 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
8445 case Intrinsic::experimental_vector_reduce_mul:
8446 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
8448 case Intrinsic::experimental_vector_reduce_and:
8449 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
8451 case Intrinsic::experimental_vector_reduce_or:
8452 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
8454 case Intrinsic::experimental_vector_reduce_xor:
8455 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
8457 case Intrinsic::experimental_vector_reduce_smax:
8458 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
8460 case Intrinsic::experimental_vector_reduce_smin:
8461 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
8463 case Intrinsic::experimental_vector_reduce_umax:
8464 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
8466 case Intrinsic::experimental_vector_reduce_umin:
8467 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
8469 case Intrinsic::experimental_vector_reduce_fmax:
8470 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1);
8472 case Intrinsic::experimental_vector_reduce_fmin:
8473 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1);
8476 llvm_unreachable("Unhandled vector reduce intrinsic");
8481 /// Returns an AttributeList representing the attributes applied to the return
8482 /// value of the given call.
8483 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
8484 SmallVector<Attribute::AttrKind, 2> Attrs;
8486 Attrs.push_back(Attribute::SExt);
8488 Attrs.push_back(Attribute::ZExt);
8490 Attrs.push_back(Attribute::InReg);
8492 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
8496 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
8497 /// implementation, which just calls LowerCall.
8498 /// FIXME: When all targets are
8499 /// migrated to using LowerCall, this hook should be integrated into SDISel.
8500 std::pair<SDValue, SDValue>
8501 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
8502 // Handle the incoming return values from the call.
8504 Type *OrigRetTy = CLI.RetTy;
8505 SmallVector<EVT, 4> RetTys;
8506 SmallVector<uint64_t, 4> Offsets;
8507 auto &DL = CLI.DAG.getDataLayout();
8508 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
8510 if (CLI.IsPostTypeLegalization) {
8511 // If we are lowering a libcall after legalization, split the return type.
8512 SmallVector<EVT, 4> OldRetTys = std::move(RetTys);
8513 SmallVector<uint64_t, 4> OldOffsets = std::move(Offsets);
8514 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
8515 EVT RetVT = OldRetTys[i];
8516 uint64_t Offset = OldOffsets[i];
8517 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
8518 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
8519 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
8520 RetTys.append(NumRegs, RegisterVT);
8521 for (unsigned j = 0; j != NumRegs; ++j)
8522 Offsets.push_back(Offset + j * RegisterVTByteSZ);
8526 SmallVector<ISD::OutputArg, 4> Outs;
8527 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
8529 bool CanLowerReturn =
8530 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
8531 CLI.IsVarArg, Outs, CLI.RetTy->getContext());
8533 SDValue DemoteStackSlot;
8534 int DemoteStackIdx = -100;
8535 if (!CanLowerReturn) {
8536 // FIXME: equivalent assert?
8537 // assert(!CS.hasInAllocaArgument() &&
8538 // "sret demotion is incompatible with inalloca");
8539 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
8540 unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
8541 MachineFunction &MF = CLI.DAG.getMachineFunction();
8542 DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
8543 Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
8544 DL.getAllocaAddrSpace());
8546 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
8548 Entry.Node = DemoteStackSlot;
8549 Entry.Ty = StackSlotPtrType;
8550 Entry.IsSExt = false;
8551 Entry.IsZExt = false;
8552 Entry.IsInReg = false;
8553 Entry.IsSRet = true;
8554 Entry.IsNest = false;
8555 Entry.IsByVal = false;
8556 Entry.IsReturned = false;
8557 Entry.IsSwiftSelf = false;
8558 Entry.IsSwiftError = false;
8559 Entry.Alignment = Align;
8560 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
8561 CLI.NumFixedArgs += 1;
8562 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
8564 // sret demotion isn't compatible with tail-calls, since the sret argument
8565 // points into the callers stack frame.
8566 CLI.IsTailCall = false;
8568 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
8570 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8572 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8574 for (unsigned i = 0; i != NumRegs; ++i) {
8575 ISD::InputArg MyFlags;
8576 MyFlags.VT = RegisterVT;
8578 MyFlags.Used = CLI.IsReturnValueUsed;
8580 MyFlags.Flags.setSExt();
8582 MyFlags.Flags.setZExt();
8584 MyFlags.Flags.setInReg();
8585 CLI.Ins.push_back(MyFlags);
8590 // We push in swifterror return as the last element of CLI.Ins.
8591 ArgListTy &Args = CLI.getArgs();
8592 if (supportSwiftError()) {
8593 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
8594 if (Args[i].IsSwiftError) {
8595 ISD::InputArg MyFlags;
8596 MyFlags.VT = getPointerTy(DL);
8597 MyFlags.ArgVT = EVT(getPointerTy(DL));
8598 MyFlags.Flags.setSwiftError();
8599 CLI.Ins.push_back(MyFlags);
8604 // Handle all of the outgoing arguments.
8606 CLI.OutVals.clear();
8607 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
8608 SmallVector<EVT, 4> ValueVTs;
8609 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
8610 // FIXME: Split arguments if CLI.IsPostTypeLegalization
8611 Type *FinalType = Args[i].Ty;
8612 if (Args[i].IsByVal)
8613 FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
8614 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
8615 FinalType, CLI.CallConv, CLI.IsVarArg);
8616 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
8618 EVT VT = ValueVTs[Value];
8619 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
8620 SDValue Op = SDValue(Args[i].Node.getNode(),
8621 Args[i].Node.getResNo() + Value);
8622 ISD::ArgFlagsTy Flags;
8624 // Certain targets (such as MIPS), may have a different ABI alignment
8625 // for a type depending on the context. Give the target a chance to
8626 // specify the alignment it wants.
8627 unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL);
8633 if (Args[i].IsInReg) {
8634 // If we are using vectorcall calling convention, a structure that is
8635 // passed InReg - is surely an HVA
8636 if (CLI.CallConv == CallingConv::X86_VectorCall &&
8637 isa<StructType>(FinalType)) {
8638 // The first value of a structure is marked
8640 Flags.setHvaStart();
8648 if (Args[i].IsSwiftSelf)
8649 Flags.setSwiftSelf();
8650 if (Args[i].IsSwiftError)
8651 Flags.setSwiftError();
8652 if (Args[i].IsByVal)
8654 if (Args[i].IsInAlloca) {
8655 Flags.setInAlloca();
8656 // Set the byval flag for CCAssignFn callbacks that don't know about
8657 // inalloca. This way we can know how many bytes we should've allocated
8658 // and how many bytes a callee cleanup function will pop. If we port
8659 // inalloca to more targets, we'll have to add custom inalloca handling
8660 // in the various CC lowering callbacks.
8663 if (Args[i].IsByVal || Args[i].IsInAlloca) {
8664 PointerType *Ty = cast<PointerType>(Args[i].Ty);
8665 Type *ElementTy = Ty->getElementType();
8666 Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
8667 // For ByVal, alignment should come from FE. BE will guess if this
8668 // info is not there but there are cases it cannot get right.
8669 unsigned FrameAlign;
8670 if (Args[i].Alignment)
8671 FrameAlign = Args[i].Alignment;
8673 FrameAlign = getByValTypeAlignment(ElementTy, DL);
8674 Flags.setByValAlign(FrameAlign);
8679 Flags.setInConsecutiveRegs();
8680 Flags.setOrigAlign(OriginalAlignment);
8682 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8684 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8686 SmallVector<SDValue, 4> Parts(NumParts);
8687 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
8690 ExtendKind = ISD::SIGN_EXTEND;
8691 else if (Args[i].IsZExt)
8692 ExtendKind = ISD::ZERO_EXTEND;
8694 // Conservatively only handle 'returned' on non-vectors that can be lowered,
8696 if (Args[i].IsReturned && !Op.getValueType().isVector() &&
8698 assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
8699 "unexpected use of 'returned'");
8700 // Before passing 'returned' to the target lowering code, ensure that
8701 // either the register MVT and the actual EVT are the same size or that
8702 // the return value and argument are extended in the same way; in these
8703 // cases it's safe to pass the argument register value unchanged as the
8704 // return register value (although it's at the target's option whether
8706 // TODO: allow code generation to take advantage of partially preserved
8707 // registers rather than clobbering the entire register when the
8708 // parameter extension method is not compatible with the return
8710 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
8711 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
8712 CLI.RetZExt == Args[i].IsZExt))
8713 Flags.setReturned();
8716 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
8717 CLI.CS.getInstruction(), CLI.CallConv, ExtendKind);
8719 for (unsigned j = 0; j != NumParts; ++j) {
8720 // if it isn't first piece, alignment must be 1
8721 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
8722 i < CLI.NumFixedArgs,
8723 i, j*Parts[j].getValueType().getStoreSize());
8724 if (NumParts > 1 && j == 0)
8725 MyFlags.Flags.setSplit();
8727 MyFlags.Flags.setOrigAlign(1);
8728 if (j == NumParts - 1)
8729 MyFlags.Flags.setSplitEnd();
8732 CLI.Outs.push_back(MyFlags);
8733 CLI.OutVals.push_back(Parts[j]);
8736 if (NeedsRegBlock && Value == NumValues - 1)
8737 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
8741 SmallVector<SDValue, 4> InVals;
8742 CLI.Chain = LowerCall(CLI, InVals);
8744 // Update CLI.InVals to use outside of this function.
8745 CLI.InVals = InVals;
8747 // Verify that the target's LowerCall behaved as expected.
8748 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
8749 "LowerCall didn't return a valid chain!");
8750 assert((!CLI.IsTailCall || InVals.empty()) &&
8751 "LowerCall emitted a return value for a tail call!");
8752 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
8753 "LowerCall didn't emit the correct number of values!");
8755 // For a tail call, the return value is merely live-out and there aren't
8756 // any nodes in the DAG representing it. Return a special value to
8757 // indicate that a tail call has been emitted and no more Instructions
8758 // should be processed in the current block.
8759 if (CLI.IsTailCall) {
8760 CLI.DAG.setRoot(CLI.Chain);
8761 return std::make_pair(SDValue(), SDValue());
8765 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
8766 assert(InVals[i].getNode() && "LowerCall emitted a null value!");
8767 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
8768 "LowerCall emitted a value with the wrong type!");
8772 SmallVector<SDValue, 4> ReturnValues;
8773 if (!CanLowerReturn) {
8774 // The instruction result is the result of loading from the
8775 // hidden sret parameter.
8776 SmallVector<EVT, 1> PVTs;
8777 Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
8779 ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
8780 assert(PVTs.size() == 1 && "Pointers should fit in one register");
8781 EVT PtrVT = PVTs[0];
8783 unsigned NumValues = RetTys.size();
8784 ReturnValues.resize(NumValues);
8785 SmallVector<SDValue, 4> Chains(NumValues);
8787 // An aggregate return value cannot wrap around the address space, so
8788 // offsets to its parts don't wrap either.
8790 Flags.setNoUnsignedWrap(true);
8792 for (unsigned i = 0; i < NumValues; ++i) {
8793 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
8794 CLI.DAG.getConstant(Offsets[i], CLI.DL,
8796 SDValue L = CLI.DAG.getLoad(
8797 RetTys[i], CLI.DL, CLI.Chain, Add,
8798 MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
8799 DemoteStackIdx, Offsets[i]),
8800 /* Alignment = */ 1);
8801 ReturnValues[i] = L;
8802 Chains[i] = L.getValue(1);
8805 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
8807 // Collect the legal value parts into potentially illegal values
8808 // that correspond to the original function's return values.
8809 Optional<ISD::NodeType> AssertOp;
8811 AssertOp = ISD::AssertSext;
8812 else if (CLI.RetZExt)
8813 AssertOp = ISD::AssertZext;
8814 unsigned CurReg = 0;
8815 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
8817 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8819 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8822 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
8823 NumRegs, RegisterVT, VT, nullptr,
8824 CLI.CallConv, AssertOp));
8828 // For a function returning void, there is no return value. We can't create
8829 // such a node, so we just return a null return value in that case. In
8830 // that case, nothing will actually look at the value.
8831 if (ReturnValues.empty())
8832 return std::make_pair(SDValue(), CLI.Chain);
8835 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
8836 CLI.DAG.getVTList(RetTys), ReturnValues);
8837 return std::make_pair(Res, CLI.Chain);
8840 void TargetLowering::LowerOperationWrapper(SDNode *N,
8841 SmallVectorImpl<SDValue> &Results,
8842 SelectionDAG &DAG) const {
8843 if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
8844 Results.push_back(Res);
8847 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
8848 llvm_unreachable("LowerOperation not implemented for this target!");
8852 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
8853 SDValue Op = getNonRegisterValue(V);
8854 assert((Op.getOpcode() != ISD::CopyFromReg ||
8855 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
8856 "Copy from a reg to the same reg!");
8857 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
8859 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8860 // If this is an InlineAsm we have to match the registers required, not the
8861 // notional registers required by the type.
8863 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
8864 None); // This is not an ABI copy.
8865 SDValue Chain = DAG.getEntryNode();
8867 ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
8868 FuncInfo.PreferredExtendType.end())
8870 : FuncInfo.PreferredExtendType[V];
8871 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
8872 PendingExports.push_back(Chain);
8875 #include "llvm/CodeGen/SelectionDAGISel.h"
8877 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
8878 /// entry block, return true. This includes arguments used by switches, since
8879 /// the switch may expand into multiple basic blocks.
8880 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
8881 // With FastISel active, we may be splitting blocks, so force creation
8882 // of virtual registers for all non-dead arguments.
8884 return A->use_empty();
8886 const BasicBlock &Entry = A->getParent()->front();
8887 for (const User *U : A->users())
8888 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
8889 return false; // Use not in entry block.
8894 using ArgCopyElisionMapTy =
8895 DenseMap<const Argument *,
8896 std::pair<const AllocaInst *, const StoreInst *>>;
8898 /// Scan the entry block of the function in FuncInfo for arguments that look
8899 /// like copies into a local alloca. Record any copied arguments in
8900 /// ArgCopyElisionCandidates.
8902 findArgumentCopyElisionCandidates(const DataLayout &DL,
8903 FunctionLoweringInfo *FuncInfo,
8904 ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
8905 // Record the state of every static alloca used in the entry block. Argument
8906 // allocas are all used in the entry block, so we need approximately as many
8907 // entries as we have arguments.
8908 enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
8909 SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
8910 unsigned NumArgs = FuncInfo->Fn->arg_size();
8911 StaticAllocas.reserve(NumArgs * 2);
8913 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
8916 V = V->stripPointerCasts();
8917 const auto *AI = dyn_cast<AllocaInst>(V);
8918 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
8920 auto Iter = StaticAllocas.insert({AI, Unknown});
8921 return &Iter.first->second;
8924 // Look for stores of arguments to static allocas. Look through bitcasts and
8925 // GEPs to handle type coercions, as long as the alloca is fully initialized
8926 // by the store. Any non-store use of an alloca escapes it and any subsequent
8927 // unanalyzed store might write it.
8928 // FIXME: Handle structs initialized with multiple stores.
8929 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
8930 // Look for stores, and handle non-store uses conservatively.
8931 const auto *SI = dyn_cast<StoreInst>(&I);
8933 // We will look through cast uses, so ignore them completely.
8936 // Ignore debug info intrinsics, they don't escape or store to allocas.
8937 if (isa<DbgInfoIntrinsic>(I))
8939 // This is an unknown instruction. Assume it escapes or writes to all
8940 // static alloca operands.
8941 for (const Use &U : I.operands()) {
8942 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
8943 *Info = StaticAllocaInfo::Clobbered;
8948 // If the stored value is a static alloca, mark it as escaped.
8949 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
8950 *Info = StaticAllocaInfo::Clobbered;
8952 // Check if the destination is a static alloca.
8953 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
8954 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
8957 const AllocaInst *AI = cast<AllocaInst>(Dst);
8959 // Skip allocas that have been initialized or clobbered.
8960 if (*Info != StaticAllocaInfo::Unknown)
8963 // Check if the stored value is an argument, and that this store fully
8964 // initializes the alloca. Don't elide copies from the same argument twice.
8965 const Value *Val = SI->getValueOperand()->stripPointerCasts();
8966 const auto *Arg = dyn_cast<Argument>(Val);
8967 if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() ||
8968 Arg->getType()->isEmptyTy() ||
8969 DL.getTypeStoreSize(Arg->getType()) !=
8970 DL.getTypeAllocSize(AI->getAllocatedType()) ||
8971 ArgCopyElisionCandidates.count(Arg)) {
8972 *Info = StaticAllocaInfo::Clobbered;
8976 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
8979 // Mark this alloca and store for argument copy elision.
8980 *Info = StaticAllocaInfo::Elidable;
8981 ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
8983 // Stop scanning if we've seen all arguments. This will happen early in -O0
8984 // builds, which is useful, because -O0 builds have large entry blocks and
8986 if (ArgCopyElisionCandidates.size() == NumArgs)
8991 /// Try to elide argument copies from memory into a local alloca. Succeeds if
8992 /// ArgVal is a load from a suitable fixed stack object.
8993 static void tryToElideArgumentCopy(
8994 FunctionLoweringInfo *FuncInfo, SmallVectorImpl<SDValue> &Chains,
8995 DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
8996 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
8997 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
8998 SDValue ArgVal, bool &ArgHasUses) {
8999 // Check if this is a load from a fixed stack object.
9000 auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
9003 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
9007 // Check that the fixed stack object is the right size and alignment.
9008 // Look at the alignment that the user wrote on the alloca instead of looking
9009 // at the stack object.
9010 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
9011 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
9012 const AllocaInst *AI = ArgCopyIter->second.first;
9013 int FixedIndex = FINode->getIndex();
9014 int &AllocaIndex = FuncInfo->StaticAllocaMap[AI];
9015 int OldIndex = AllocaIndex;
9016 MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo();
9017 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
9019 dbgs() << " argument copy elision failed due to bad fixed stack "
9023 unsigned RequiredAlignment = AI->getAlignment();
9024 if (!RequiredAlignment) {
9025 RequiredAlignment = FuncInfo->MF->getDataLayout().getABITypeAlignment(
9026 AI->getAllocatedType());
9028 if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) {
9029 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
9030 "greater than stack argument alignment ("
9031 << RequiredAlignment << " vs "
9032 << MFI.getObjectAlignment(FixedIndex) << ")\n");
9036 // Perform the elision. Delete the old stack object and replace its only use
9037 // in the variable info map. Mark the stack object as mutable.
9039 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
9040 << " Replacing frame index " << OldIndex << " with " << FixedIndex
9043 MFI.RemoveStackObject(OldIndex);
9044 MFI.setIsImmutableObjectIndex(FixedIndex, false);
9045 AllocaIndex = FixedIndex;
9046 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
9047 Chains.push_back(ArgVal.getValue(1));
9049 // Avoid emitting code for the store implementing the copy.
9050 const StoreInst *SI = ArgCopyIter->second.second;
9051 ElidedArgCopyInstrs.insert(SI);
9053 // Check for uses of the argument again so that we can avoid exporting ArgVal
9054 // if it is't used by anything other than the store.
9055 for (const Value *U : Arg.users()) {
9063 void SelectionDAGISel::LowerArguments(const Function &F) {
9064 SelectionDAG &DAG = SDB->DAG;
9065 SDLoc dl = SDB->getCurSDLoc();
9066 const DataLayout &DL = DAG.getDataLayout();
9067 SmallVector<ISD::InputArg, 16> Ins;
9069 if (!FuncInfo->CanLowerReturn) {
9070 // Put in an sret pointer parameter before all the other parameters.
9071 SmallVector<EVT, 1> ValueVTs;
9072 ComputeValueVTs(*TLI, DAG.getDataLayout(),
9073 F.getReturnType()->getPointerTo(
9074 DAG.getDataLayout().getAllocaAddrSpace()),
9077 // NOTE: Assuming that a pointer will never break down to more than one VT
9079 ISD::ArgFlagsTy Flags;
9081 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
9082 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
9083 ISD::InputArg::NoArgIndex, 0);
9084 Ins.push_back(RetArg);
9087 // Look for stores of arguments to static allocas. Mark such arguments with a
9088 // flag to ask the target to give us the memory location of that argument if
9090 ArgCopyElisionMapTy ArgCopyElisionCandidates;
9091 findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates);
9093 // Set up the incoming argument description vector.
9094 for (const Argument &Arg : F.args()) {
9095 unsigned ArgNo = Arg.getArgNo();
9096 SmallVector<EVT, 4> ValueVTs;
9097 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9098 bool isArgValueUsed = !Arg.use_empty();
9099 unsigned PartBase = 0;
9100 Type *FinalType = Arg.getType();
9101 if (Arg.hasAttribute(Attribute::ByVal))
9102 FinalType = cast<PointerType>(FinalType)->getElementType();
9103 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
9104 FinalType, F.getCallingConv(), F.isVarArg());
9105 for (unsigned Value = 0, NumValues = ValueVTs.size();
9106 Value != NumValues; ++Value) {
9107 EVT VT = ValueVTs[Value];
9108 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
9109 ISD::ArgFlagsTy Flags;
9111 // Certain targets (such as MIPS), may have a different ABI alignment
9112 // for a type depending on the context. Give the target a chance to
9113 // specify the alignment it wants.
9114 unsigned OriginalAlignment =
9115 TLI->getABIAlignmentForCallingConv(ArgTy, DL);
9117 if (Arg.hasAttribute(Attribute::ZExt))
9119 if (Arg.hasAttribute(Attribute::SExt))
9121 if (Arg.hasAttribute(Attribute::InReg)) {
9122 // If we are using vectorcall calling convention, a structure that is
9123 // passed InReg - is surely an HVA
9124 if (F.getCallingConv() == CallingConv::X86_VectorCall &&
9125 isa<StructType>(Arg.getType())) {
9126 // The first value of a structure is marked
9128 Flags.setHvaStart();
9134 if (Arg.hasAttribute(Attribute::StructRet))
9136 if (Arg.hasAttribute(Attribute::SwiftSelf))
9137 Flags.setSwiftSelf();
9138 if (Arg.hasAttribute(Attribute::SwiftError))
9139 Flags.setSwiftError();
9140 if (Arg.hasAttribute(Attribute::ByVal))
9142 if (Arg.hasAttribute(Attribute::InAlloca)) {
9143 Flags.setInAlloca();
9144 // Set the byval flag for CCAssignFn callbacks that don't know about
9145 // inalloca. This way we can know how many bytes we should've allocated
9146 // and how many bytes a callee cleanup function will pop. If we port
9147 // inalloca to more targets, we'll have to add custom inalloca handling
9148 // in the various CC lowering callbacks.
9151 if (F.getCallingConv() == CallingConv::X86_INTR) {
9152 // IA Interrupt passes frame (1st parameter) by value in the stack.
9156 if (Flags.isByVal() || Flags.isInAlloca()) {
9157 PointerType *Ty = cast<PointerType>(Arg.getType());
9158 Type *ElementTy = Ty->getElementType();
9159 Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
9160 // For ByVal, alignment should be passed from FE. BE will guess if
9161 // this info is not there but there are cases it cannot get right.
9162 unsigned FrameAlign;
9163 if (Arg.getParamAlignment())
9164 FrameAlign = Arg.getParamAlignment();
9166 FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
9167 Flags.setByValAlign(FrameAlign);
9169 if (Arg.hasAttribute(Attribute::Nest))
9172 Flags.setInConsecutiveRegs();
9173 Flags.setOrigAlign(OriginalAlignment);
9174 if (ArgCopyElisionCandidates.count(&Arg))
9175 Flags.setCopyElisionCandidate();
9177 MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9178 *CurDAG->getContext(), F.getCallingConv(), VT);
9179 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9180 *CurDAG->getContext(), F.getCallingConv(), VT);
9181 for (unsigned i = 0; i != NumRegs; ++i) {
9182 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9183 ArgNo, PartBase+i*RegisterVT.getStoreSize());
9184 if (NumRegs > 1 && i == 0)
9185 MyFlags.Flags.setSplit();
9186 // if it isn't first piece, alignment must be 1
9188 MyFlags.Flags.setOrigAlign(1);
9189 if (i == NumRegs - 1)
9190 MyFlags.Flags.setSplitEnd();
9192 Ins.push_back(MyFlags);
9194 if (NeedsRegBlock && Value == NumValues - 1)
9195 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
9196 PartBase += VT.getStoreSize();
9200 // Call the target to set up the argument values.
9201 SmallVector<SDValue, 8> InVals;
9202 SDValue NewRoot = TLI->LowerFormalArguments(
9203 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
9205 // Verify that the target's LowerFormalArguments behaved as expected.
9206 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
9207 "LowerFormalArguments didn't return a valid chain!");
9208 assert(InVals.size() == Ins.size() &&
9209 "LowerFormalArguments didn't emit the correct number of values!");
9211 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
9212 assert(InVals[i].getNode() &&
9213 "LowerFormalArguments emitted a null value!");
9214 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
9215 "LowerFormalArguments emitted a value with the wrong type!");
9219 // Update the DAG with the new chain value resulting from argument lowering.
9220 DAG.setRoot(NewRoot);
9222 // Set up the argument values.
9224 if (!FuncInfo->CanLowerReturn) {
9225 // Create a virtual register for the sret pointer, and put in a copy
9226 // from the sret argument into it.
9227 SmallVector<EVT, 1> ValueVTs;
9228 ComputeValueVTs(*TLI, DAG.getDataLayout(),
9229 F.getReturnType()->getPointerTo(
9230 DAG.getDataLayout().getAllocaAddrSpace()),
9232 MVT VT = ValueVTs[0].getSimpleVT();
9233 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9234 Optional<ISD::NodeType> AssertOp = None;
9235 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
9236 nullptr, F.getCallingConv(), AssertOp);
9238 MachineFunction& MF = SDB->DAG.getMachineFunction();
9239 MachineRegisterInfo& RegInfo = MF.getRegInfo();
9240 unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
9241 FuncInfo->DemoteRegister = SRetReg;
9243 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
9244 DAG.setRoot(NewRoot);
9246 // i indexes lowered arguments. Bump it past the hidden sret argument.
9250 SmallVector<SDValue, 4> Chains;
9251 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
9252 for (const Argument &Arg : F.args()) {
9253 SmallVector<SDValue, 4> ArgValues;
9254 SmallVector<EVT, 4> ValueVTs;
9255 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9256 unsigned NumValues = ValueVTs.size();
9260 bool ArgHasUses = !Arg.use_empty();
9262 // Elide the copying store if the target loaded this argument from a
9263 // suitable fixed stack object.
9264 if (Ins[i].Flags.isCopyElisionCandidate()) {
9265 tryToElideArgumentCopy(FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
9266 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
9267 InVals[i], ArgHasUses);
9270 // If this argument is unused then remember its value. It is used to generate
9271 // debugging information.
9272 bool isSwiftErrorArg =
9273 TLI->supportSwiftError() &&
9274 Arg.hasAttribute(Attribute::SwiftError);
9275 if (!ArgHasUses && !isSwiftErrorArg) {
9276 SDB->setUnusedArgValue(&Arg, InVals[i]);
9278 // Also remember any frame index for use in FastISel.
9279 if (FrameIndexSDNode *FI =
9280 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
9281 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9284 for (unsigned Val = 0; Val != NumValues; ++Val) {
9285 EVT VT = ValueVTs[Val];
9286 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
9287 F.getCallingConv(), VT);
9288 unsigned NumParts = TLI->getNumRegistersForCallingConv(
9289 *CurDAG->getContext(), F.getCallingConv(), VT);
9291 // Even an apparant 'unused' swifterror argument needs to be returned. So
9292 // we do generate a copy for it that can be used on return from the
9294 if (ArgHasUses || isSwiftErrorArg) {
9295 Optional<ISD::NodeType> AssertOp;
9296 if (Arg.hasAttribute(Attribute::SExt))
9297 AssertOp = ISD::AssertSext;
9298 else if (Arg.hasAttribute(Attribute::ZExt))
9299 AssertOp = ISD::AssertZext;
9301 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
9302 PartVT, VT, nullptr,
9303 F.getCallingConv(), AssertOp));
9309 // We don't need to do anything else for unused arguments.
9310 if (ArgValues.empty())
9313 // Note down frame index.
9314 if (FrameIndexSDNode *FI =
9315 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
9316 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9318 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
9319 SDB->getCurSDLoc());
9321 SDB->setValue(&Arg, Res);
9322 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
9323 // We want to associate the argument with the frame index, among
9324 // involved operands, that correspond to the lowest address. The
9325 // getCopyFromParts function, called earlier, is swapping the order of
9326 // the operands to BUILD_PAIR depending on endianness. The result of
9327 // that swapping is that the least significant bits of the argument will
9328 // be in the first operand of the BUILD_PAIR node, and the most
9329 // significant bits will be in the second operand.
9330 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9331 if (LoadSDNode *LNode =
9332 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
9333 if (FrameIndexSDNode *FI =
9334 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
9335 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9338 // Update the SwiftErrorVRegDefMap.
9339 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
9340 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9341 if (TargetRegisterInfo::isVirtualRegister(Reg))
9342 FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB,
9343 FuncInfo->SwiftErrorArg, Reg);
9346 // If this argument is live outside of the entry block, insert a copy from
9347 // wherever we got it to the vreg that other BB's will reference it as.
9348 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
9349 // If we can, though, try to skip creating an unnecessary vreg.
9350 // FIXME: This isn't very clean... it would be nice to make this more
9351 // general. It's also subtly incompatible with the hacks FastISel
9353 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9354 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
9355 FuncInfo->ValueMap[&Arg] = Reg;
9359 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
9360 FuncInfo->InitializeRegForValue(&Arg);
9361 SDB->CopyToExportRegsIfNeeded(&Arg);
9365 if (!Chains.empty()) {
9366 Chains.push_back(NewRoot);
9367 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
9370 DAG.setRoot(NewRoot);
9372 assert(i == InVals.size() && "Argument register count mismatch!");
9374 // If any argument copy elisions occurred and we have debug info, update the
9375 // stale frame indices used in the dbg.declare variable info table.
9376 MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
9377 if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
9378 for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
9379 auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
9380 if (I != ArgCopyElisionFrameIndexMap.end())
9381 VI.Slot = I->second;
9385 // Finally, if the target has anything special to do, allow it to do so.
9386 EmitFunctionEntryCode();
9389 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
9390 /// ensure constants are generated when needed. Remember the virtual registers
9391 /// that need to be added to the Machine PHI nodes as input. We cannot just
9392 /// directly add them, because expansion might result in multiple MBB's for one
9393 /// BB. As such, the start of the BB might correspond to a different MBB than
9396 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
9397 const Instruction *TI = LLVMBB->getTerminator();
9399 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
9401 // Check PHI nodes in successors that expect a value to be available from this
9403 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
9404 const BasicBlock *SuccBB = TI->getSuccessor(succ);
9405 if (!isa<PHINode>(SuccBB->begin())) continue;
9406 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
9408 // If this terminator has multiple identical successors (common for
9409 // switches), only handle each succ once.
9410 if (!SuccsHandled.insert(SuccMBB).second)
9413 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
9415 // At this point we know that there is a 1-1 correspondence between LLVM PHI
9416 // nodes and Machine PHI nodes, but the incoming operands have not been
9418 for (const PHINode &PN : SuccBB->phis()) {
9419 // Ignore dead phi's.
9424 if (PN.getType()->isEmptyTy())
9428 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
9430 if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
9431 unsigned &RegOut = ConstantsOut[C];
9433 RegOut = FuncInfo.CreateRegs(C->getType());
9434 CopyValueToVirtualRegister(C, RegOut);
9438 DenseMap<const Value *, unsigned>::iterator I =
9439 FuncInfo.ValueMap.find(PHIOp);
9440 if (I != FuncInfo.ValueMap.end())
9443 assert(isa<AllocaInst>(PHIOp) &&
9444 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
9445 "Didn't codegen value into a register!??");
9446 Reg = FuncInfo.CreateRegs(PHIOp->getType());
9447 CopyValueToVirtualRegister(PHIOp, Reg);
9451 // Remember that this register needs to added to the machine PHI node as
9452 // the input for this MBB.
9453 SmallVector<EVT, 4> ValueVTs;
9454 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9455 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
9456 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
9457 EVT VT = ValueVTs[vti];
9458 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
9459 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
9460 FuncInfo.PHINodesToUpdate.push_back(
9461 std::make_pair(&*MBBI++, Reg + i));
9462 Reg += NumRegisters;
9467 ConstantsOut.clear();
9470 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
9473 SelectionDAGBuilder::StackProtectorDescriptor::
9474 AddSuccessorMBB(const BasicBlock *BB,
9475 MachineBasicBlock *ParentMBB,
9477 MachineBasicBlock *SuccMBB) {
9478 // If SuccBB has not been created yet, create it.
9480 MachineFunction *MF = ParentMBB->getParent();
9481 MachineFunction::iterator BBI(ParentMBB);
9482 SuccMBB = MF->CreateMachineBasicBlock(BB);
9483 MF->insert(++BBI, SuccMBB);
9485 // Add it as a successor of ParentMBB.
9486 ParentMBB->addSuccessor(
9487 SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
9491 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
9492 MachineFunction::iterator I(MBB);
9493 if (++I == FuncInfo.MF->end())
9498 /// During lowering new call nodes can be created (such as memset, etc.).
9499 /// Those will become new roots of the current DAG, but complications arise
9500 /// when they are tail calls. In such cases, the call lowering will update
9501 /// the root, but the builder still needs to know that a tail call has been
9502 /// lowered in order to avoid generating an additional return.
9503 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
9504 // If the node is null, we do have a tail call.
9505 if (MaybeTC.getNode() != nullptr)
9506 DAG.setRoot(MaybeTC);
9512 SelectionDAGBuilder::getJumpTableRange(const CaseClusterVector &Clusters,
9513 unsigned First, unsigned Last) const {
9514 assert(Last >= First);
9515 const APInt &LowCase = Clusters[First].Low->getValue();
9516 const APInt &HighCase = Clusters[Last].High->getValue();
9517 assert(LowCase.getBitWidth() == HighCase.getBitWidth());
9519 // FIXME: A range of consecutive cases has 100% density, but only requires one
9520 // comparison to lower. We should discriminate against such consecutive ranges
9523 return (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100) + 1;
9526 uint64_t SelectionDAGBuilder::getJumpTableNumCases(
9527 const SmallVectorImpl<unsigned> &TotalCases, unsigned First,
9528 unsigned Last) const {
9529 assert(Last >= First);
9530 assert(TotalCases[Last] >= TotalCases[First]);
9532 TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
9536 bool SelectionDAGBuilder::buildJumpTable(const CaseClusterVector &Clusters,
9537 unsigned First, unsigned Last,
9538 const SwitchInst *SI,
9539 MachineBasicBlock *DefaultMBB,
9540 CaseCluster &JTCluster) {
9541 assert(First <= Last);
9543 auto Prob = BranchProbability::getZero();
9544 unsigned NumCmps = 0;
9545 std::vector<MachineBasicBlock*> Table;
9546 DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
9548 // Initialize probabilities in JTProbs.
9549 for (unsigned I = First; I <= Last; ++I)
9550 JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
9552 for (unsigned I = First; I <= Last; ++I) {
9553 assert(Clusters[I].Kind == CC_Range);
9554 Prob += Clusters[I].Prob;
9555 const APInt &Low = Clusters[I].Low->getValue();
9556 const APInt &High = Clusters[I].High->getValue();
9557 NumCmps += (Low == High) ? 1 : 2;
9559 // Fill the gap between this and the previous cluster.
9560 const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
9561 assert(PreviousHigh.slt(Low));
9562 uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
9563 for (uint64_t J = 0; J < Gap; J++)
9564 Table.push_back(DefaultMBB);
9566 uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
9567 for (uint64_t J = 0; J < ClusterSize; ++J)
9568 Table.push_back(Clusters[I].MBB);
9569 JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
9572 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9573 unsigned NumDests = JTProbs.size();
9574 if (TLI.isSuitableForBitTests(
9575 NumDests, NumCmps, Clusters[First].Low->getValue(),
9576 Clusters[Last].High->getValue(), DAG.getDataLayout())) {
9577 // Clusters[First..Last] should be lowered as bit tests instead.
9581 // Create the MBB that will load from and jump through the table.
9582 // Note: We create it here, but it's not inserted into the function yet.
9583 MachineFunction *CurMF = FuncInfo.MF;
9584 MachineBasicBlock *JumpTableMBB =
9585 CurMF->CreateMachineBasicBlock(SI->getParent());
9587 // Add successors. Note: use table order for determinism.
9588 SmallPtrSet<MachineBasicBlock *, 8> Done;
9589 for (MachineBasicBlock *Succ : Table) {
9590 if (Done.count(Succ))
9592 addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
9595 JumpTableMBB->normalizeSuccProbs();
9597 unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding())
9598 ->createJumpTableIndex(Table);
9600 // Set up the jump table info.
9601 JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
9602 JumpTableHeader JTH(Clusters[First].Low->getValue(),
9603 Clusters[Last].High->getValue(), SI->getCondition(),
9605 JTCases.emplace_back(std::move(JTH), std::move(JT));
9607 JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
9608 JTCases.size() - 1, Prob);
9612 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
9613 const SwitchInst *SI,
9614 MachineBasicBlock *DefaultMBB) {
9616 // Clusters must be non-empty, sorted, and only contain Range clusters.
9617 assert(!Clusters.empty());
9618 for (CaseCluster &C : Clusters)
9619 assert(C.Kind == CC_Range);
9620 for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
9621 assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
9624 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9625 if (!TLI.areJTsAllowed(SI->getParent()->getParent()))
9628 const int64_t N = Clusters.size();
9629 const unsigned MinJumpTableEntries = TLI.getMinimumJumpTableEntries();
9630 const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
9632 if (N < 2 || N < MinJumpTableEntries)
9635 // TotalCases[i]: Total nbr of cases in Clusters[0..i].
9636 SmallVector<unsigned, 8> TotalCases(N);
9637 for (unsigned i = 0; i < N; ++i) {
9638 const APInt &Hi = Clusters[i].High->getValue();
9639 const APInt &Lo = Clusters[i].Low->getValue();
9640 TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
9642 TotalCases[i] += TotalCases[i - 1];
9645 // Cheap case: the whole range may be suitable for jump table.
9646 uint64_t Range = getJumpTableRange(Clusters,0, N - 1);
9647 uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1);
9648 assert(NumCases < UINT64_MAX / 100);
9649 assert(Range >= NumCases);
9650 if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
9651 CaseCluster JTCluster;
9652 if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
9653 Clusters[0] = JTCluster;
9659 // The algorithm below is not suitable for -O0.
9660 if (TM.getOptLevel() == CodeGenOpt::None)
9663 // Split Clusters into minimum number of dense partitions. The algorithm uses
9664 // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
9665 // for the Case Statement'" (1994), but builds the MinPartitions array in
9666 // reverse order to make it easier to reconstruct the partitions in ascending
9667 // order. In the choice between two optimal partitionings, it picks the one
9668 // which yields more jump tables.
9670 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9671 SmallVector<unsigned, 8> MinPartitions(N);
9672 // LastElement[i] is the last element of the partition starting at i.
9673 SmallVector<unsigned, 8> LastElement(N);
9674 // PartitionsScore[i] is used to break ties when choosing between two
9675 // partitionings resulting in the same number of partitions.
9676 SmallVector<unsigned, 8> PartitionsScore(N);
9677 // For PartitionsScore, a small number of comparisons is considered as good as
9678 // a jump table and a single comparison is considered better than a jump
9680 enum PartitionScores : unsigned {
9687 // Base case: There is only one way to partition Clusters[N-1].
9688 MinPartitions[N - 1] = 1;
9689 LastElement[N - 1] = N - 1;
9690 PartitionsScore[N - 1] = PartitionScores::SingleCase;
9692 // Note: loop indexes are signed to avoid underflow.
9693 for (int64_t i = N - 2; i >= 0; i--) {
9694 // Find optimal partitioning of Clusters[i..N-1].
9695 // Baseline: Put Clusters[i] into a partition on its own.
9696 MinPartitions[i] = MinPartitions[i + 1] + 1;
9698 PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
9700 // Search for a solution that results in fewer partitions.
9701 for (int64_t j = N - 1; j > i; j--) {
9702 // Try building a partition from Clusters[i..j].
9703 uint64_t Range = getJumpTableRange(Clusters, i, j);
9704 uint64_t NumCases = getJumpTableNumCases(TotalCases, i, j);
9705 assert(NumCases < UINT64_MAX / 100);
9706 assert(Range >= NumCases);
9707 if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
9708 unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9709 unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
9710 int64_t NumEntries = j - i + 1;
9712 if (NumEntries == 1)
9713 Score += PartitionScores::SingleCase;
9714 else if (NumEntries <= SmallNumberOfEntries)
9715 Score += PartitionScores::FewCases;
9716 else if (NumEntries >= MinJumpTableEntries)
9717 Score += PartitionScores::Table;
9719 // If this leads to fewer partitions, or to the same number of
9720 // partitions with better score, it is a better partitioning.
9721 if (NumPartitions < MinPartitions[i] ||
9722 (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
9723 MinPartitions[i] = NumPartitions;
9725 PartitionsScore[i] = Score;
9731 // Iterate over the partitions, replacing some with jump tables in-place.
9732 unsigned DstIndex = 0;
9733 for (unsigned First = 0, Last; First < N; First = Last + 1) {
9734 Last = LastElement[First];
9735 assert(Last >= First);
9736 assert(DstIndex <= First);
9737 unsigned NumClusters = Last - First + 1;
9739 CaseCluster JTCluster;
9740 if (NumClusters >= MinJumpTableEntries &&
9741 buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
9742 Clusters[DstIndex++] = JTCluster;
9744 for (unsigned I = First; I <= Last; ++I)
9745 std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
9748 Clusters.resize(DstIndex);
9751 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
9752 unsigned First, unsigned Last,
9753 const SwitchInst *SI,
9754 CaseCluster &BTCluster) {
9755 assert(First <= Last);
9759 BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9760 unsigned NumCmps = 0;
9761 for (int64_t I = First; I <= Last; ++I) {
9762 assert(Clusters[I].Kind == CC_Range);
9763 Dests.set(Clusters[I].MBB->getNumber());
9764 NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
9766 unsigned NumDests = Dests.count();
9768 APInt Low = Clusters[First].Low->getValue();
9769 APInt High = Clusters[Last].High->getValue();
9770 assert(Low.slt(High));
9772 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9773 const DataLayout &DL = DAG.getDataLayout();
9774 if (!TLI.isSuitableForBitTests(NumDests, NumCmps, Low, High, DL))
9780 const int BitWidth = TLI.getPointerTy(DL).getSizeInBits();
9781 assert(TLI.rangeFitsInWord(Low, High, DL) &&
9782 "Case range must fit in bit mask!");
9784 // Check if the clusters cover a contiguous range such that no value in the
9785 // range will jump to the default statement.
9786 bool ContiguousRange = true;
9787 for (int64_t I = First + 1; I <= Last; ++I) {
9788 if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
9789 ContiguousRange = false;
9794 if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
9795 // Optimize the case where all the case values fit in a word without having
9796 // to subtract minValue. In this case, we can optimize away the subtraction.
9797 LowBound = APInt::getNullValue(Low.getBitWidth());
9799 ContiguousRange = false;
9802 CmpRange = High - Low;
9806 auto TotalProb = BranchProbability::getZero();
9807 for (unsigned i = First; i <= Last; ++i) {
9808 // Find the CaseBits for this destination.
9810 for (j = 0; j < CBV.size(); ++j)
9811 if (CBV[j].BB == Clusters[i].MBB)
9813 if (j == CBV.size())
9815 CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
9816 CaseBits *CB = &CBV[j];
9818 // Update Mask, Bits and ExtraProb.
9819 uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
9820 uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
9821 assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
9822 CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
9823 CB->Bits += Hi - Lo + 1;
9824 CB->ExtraProb += Clusters[i].Prob;
9825 TotalProb += Clusters[i].Prob;
9829 llvm::sort(CBV, [](const CaseBits &a, const CaseBits &b) {
9830 // Sort by probability first, number of bits second, bit mask third.
9831 if (a.ExtraProb != b.ExtraProb)
9832 return a.ExtraProb > b.ExtraProb;
9833 if (a.Bits != b.Bits)
9834 return a.Bits > b.Bits;
9835 return a.Mask < b.Mask;
9838 for (auto &CB : CBV) {
9839 MachineBasicBlock *BitTestBB =
9840 FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
9841 BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
9843 BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
9844 SI->getCondition(), -1U, MVT::Other, false,
9845 ContiguousRange, nullptr, nullptr, std::move(BTI),
9848 BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
9849 BitTestCases.size() - 1, TotalProb);
9853 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
9854 const SwitchInst *SI) {
9855 // Partition Clusters into as few subsets as possible, where each subset has a
9856 // range that fits in a machine word and has <= 3 unique destinations.
9859 // Clusters must be sorted and contain Range or JumpTable clusters.
9860 assert(!Clusters.empty());
9861 assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
9862 for (const CaseCluster &C : Clusters)
9863 assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
9864 for (unsigned i = 1; i < Clusters.size(); ++i)
9865 assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
9868 // The algorithm below is not suitable for -O0.
9869 if (TM.getOptLevel() == CodeGenOpt::None)
9872 // If target does not have legal shift left, do not emit bit tests at all.
9873 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9874 const DataLayout &DL = DAG.getDataLayout();
9876 EVT PTy = TLI.getPointerTy(DL);
9877 if (!TLI.isOperationLegal(ISD::SHL, PTy))
9880 int BitWidth = PTy.getSizeInBits();
9881 const int64_t N = Clusters.size();
9883 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9884 SmallVector<unsigned, 8> MinPartitions(N);
9885 // LastElement[i] is the last element of the partition starting at i.
9886 SmallVector<unsigned, 8> LastElement(N);
9888 // FIXME: This might not be the best algorithm for finding bit test clusters.
9890 // Base case: There is only one way to partition Clusters[N-1].
9891 MinPartitions[N - 1] = 1;
9892 LastElement[N - 1] = N - 1;
9894 // Note: loop indexes are signed to avoid underflow.
9895 for (int64_t i = N - 2; i >= 0; --i) {
9896 // Find optimal partitioning of Clusters[i..N-1].
9897 // Baseline: Put Clusters[i] into a partition on its own.
9898 MinPartitions[i] = MinPartitions[i + 1] + 1;
9901 // Search for a solution that results in fewer partitions.
9902 // Note: the search is limited by BitWidth, reducing time complexity.
9903 for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
9904 // Try building a partition from Clusters[i..j].
9907 if (!TLI.rangeFitsInWord(Clusters[i].Low->getValue(),
9908 Clusters[j].High->getValue(), DL))
9911 // Check nbr of destinations and cluster types.
9912 // FIXME: This works, but doesn't seem very efficient.
9913 bool RangesOnly = true;
9914 BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9915 for (int64_t k = i; k <= j; k++) {
9916 if (Clusters[k].Kind != CC_Range) {
9920 Dests.set(Clusters[k].MBB->getNumber());
9922 if (!RangesOnly || Dests.count() > 3)
9925 // Check if it's a better partition.
9926 unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9927 if (NumPartitions < MinPartitions[i]) {
9928 // Found a better partition.
9929 MinPartitions[i] = NumPartitions;
9935 // Iterate over the partitions, replacing with bit-test clusters in-place.
9936 unsigned DstIndex = 0;
9937 for (unsigned First = 0, Last; First < N; First = Last + 1) {
9938 Last = LastElement[First];
9939 assert(First <= Last);
9940 assert(DstIndex <= First);
9942 CaseCluster BitTestCluster;
9943 if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
9944 Clusters[DstIndex++] = BitTestCluster;
9946 size_t NumClusters = Last - First + 1;
9947 std::memmove(&Clusters[DstIndex], &Clusters[First],
9948 sizeof(Clusters[0]) * NumClusters);
9949 DstIndex += NumClusters;
9952 Clusters.resize(DstIndex);
9955 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
9956 MachineBasicBlock *SwitchMBB,
9957 MachineBasicBlock *DefaultMBB) {
9958 MachineFunction *CurMF = FuncInfo.MF;
9959 MachineBasicBlock *NextMBB = nullptr;
9960 MachineFunction::iterator BBI(W.MBB);
9961 if (++BBI != FuncInfo.MF->end())
9964 unsigned Size = W.LastCluster - W.FirstCluster + 1;
9966 BranchProbabilityInfo *BPI = FuncInfo.BPI;
9968 if (Size == 2 && W.MBB == SwitchMBB) {
9969 // If any two of the cases has the same destination, and if one value
9970 // is the same as the other, but has one bit unset that the other has set,
9971 // use bit manipulation to do two compares at once. For example:
9972 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
9973 // TODO: This could be extended to merge any 2 cases in switches with 3
9975 // TODO: Handle cases where W.CaseBB != SwitchBB.
9976 CaseCluster &Small = *W.FirstCluster;
9977 CaseCluster &Big = *W.LastCluster;
9979 if (Small.Low == Small.High && Big.Low == Big.High &&
9980 Small.MBB == Big.MBB) {
9981 const APInt &SmallValue = Small.Low->getValue();
9982 const APInt &BigValue = Big.Low->getValue();
9984 // Check that there is only one bit different.
9985 APInt CommonBit = BigValue ^ SmallValue;
9986 if (CommonBit.isPowerOf2()) {
9987 SDValue CondLHS = getValue(Cond);
9988 EVT VT = CondLHS.getValueType();
9989 SDLoc DL = getCurSDLoc();
9991 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
9992 DAG.getConstant(CommonBit, DL, VT));
9993 SDValue Cond = DAG.getSetCC(
9994 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
9997 // Update successor info.
9998 // Both Small and Big will jump to Small.BB, so we sum up the
10000 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
10002 addSuccessorWithProb(
10003 SwitchMBB, DefaultMBB,
10004 // The default destination is the first successor in IR.
10005 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
10007 addSuccessorWithProb(SwitchMBB, DefaultMBB);
10009 // Insert the true branch.
10011 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
10012 DAG.getBasicBlock(Small.MBB));
10013 // Insert the false branch.
10014 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
10015 DAG.getBasicBlock(DefaultMBB));
10017 DAG.setRoot(BrCond);
10023 if (TM.getOptLevel() != CodeGenOpt::None) {
10024 // Here, we order cases by probability so the most likely case will be
10025 // checked first. However, two clusters can have the same probability in
10026 // which case their relative ordering is non-deterministic. So we use Low
10027 // as a tie-breaker as clusters are guaranteed to never overlap.
10028 llvm::sort(W.FirstCluster, W.LastCluster + 1,
10029 [](const CaseCluster &a, const CaseCluster &b) {
10030 return a.Prob != b.Prob ?
10032 a.Low->getValue().slt(b.Low->getValue());
10035 // Rearrange the case blocks so that the last one falls through if possible
10036 // without changing the order of probabilities.
10037 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
10039 if (I->Prob > W.LastCluster->Prob)
10041 if (I->Kind == CC_Range && I->MBB == NextMBB) {
10042 std::swap(*I, *W.LastCluster);
10048 // Compute total probability.
10049 BranchProbability DefaultProb = W.DefaultProb;
10050 BranchProbability UnhandledProbs = DefaultProb;
10051 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
10052 UnhandledProbs += I->Prob;
10054 MachineBasicBlock *CurMBB = W.MBB;
10055 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
10056 MachineBasicBlock *Fallthrough;
10057 if (I == W.LastCluster) {
10058 // For the last cluster, fall through to the default destination.
10059 Fallthrough = DefaultMBB;
10061 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
10062 CurMF->insert(BBI, Fallthrough);
10063 // Put Cond in a virtual register to make it available from the new blocks.
10064 ExportFromCurrentBlock(Cond);
10066 UnhandledProbs -= I->Prob;
10069 case CC_JumpTable: {
10070 // FIXME: Optimize away range check based on pivot comparisons.
10071 JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first;
10072 JumpTable *JT = &JTCases[I->JTCasesIndex].second;
10074 // The jump block hasn't been inserted yet; insert it here.
10075 MachineBasicBlock *JumpMBB = JT->MBB;
10076 CurMF->insert(BBI, JumpMBB);
10078 auto JumpProb = I->Prob;
10079 auto FallthroughProb = UnhandledProbs;
10081 // If the default statement is a target of the jump table, we evenly
10082 // distribute the default probability to successors of CurMBB. Also
10083 // update the probability on the edge from JumpMBB to Fallthrough.
10084 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
10085 SE = JumpMBB->succ_end();
10087 if (*SI == DefaultMBB) {
10088 JumpProb += DefaultProb / 2;
10089 FallthroughProb -= DefaultProb / 2;
10090 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
10091 JumpMBB->normalizeSuccProbs();
10096 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
10097 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
10098 CurMBB->normalizeSuccProbs();
10100 // The jump table header will be inserted in our current block, do the
10101 // range check, and fall through to our fallthrough block.
10102 JTH->HeaderBB = CurMBB;
10103 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
10105 // If we're in the right place, emit the jump table header right now.
10106 if (CurMBB == SwitchMBB) {
10107 visitJumpTableHeader(*JT, *JTH, SwitchMBB);
10108 JTH->Emitted = true;
10112 case CC_BitTests: {
10113 // FIXME: Optimize away range check based on pivot comparisons.
10114 BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex];
10116 // The bit test blocks haven't been inserted yet; insert them here.
10117 for (BitTestCase &BTC : BTB->Cases)
10118 CurMF->insert(BBI, BTC.ThisBB);
10120 // Fill in fields of the BitTestBlock.
10121 BTB->Parent = CurMBB;
10122 BTB->Default = Fallthrough;
10124 BTB->DefaultProb = UnhandledProbs;
10125 // If the cases in bit test don't form a contiguous range, we evenly
10126 // distribute the probability on the edge to Fallthrough to two
10127 // successors of CurMBB.
10128 if (!BTB->ContiguousRange) {
10129 BTB->Prob += DefaultProb / 2;
10130 BTB->DefaultProb -= DefaultProb / 2;
10133 // If we're in the right place, emit the bit test header right now.
10134 if (CurMBB == SwitchMBB) {
10135 visitBitTestHeader(*BTB, SwitchMBB);
10136 BTB->Emitted = true;
10141 const Value *RHS, *LHS, *MHS;
10143 if (I->Low == I->High) {
10144 // Check Cond == I->Low.
10150 // Check I->Low <= Cond <= I->High.
10157 // The false probability is the sum of all unhandled cases.
10158 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
10159 getCurSDLoc(), I->Prob, UnhandledProbs);
10161 if (CurMBB == SwitchMBB)
10162 visitSwitchCase(CB, SwitchMBB);
10164 SwitchCases.push_back(CB);
10169 CurMBB = Fallthrough;
10173 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10174 CaseClusterIt First,
10175 CaseClusterIt Last) {
10176 return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10177 if (X.Prob != CC.Prob)
10178 return X.Prob > CC.Prob;
10180 // Ties are broken by comparing the case value.
10181 return X.Low->getValue().slt(CC.Low->getValue());
10185 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10186 const SwitchWorkListItem &W,
10188 MachineBasicBlock *SwitchMBB) {
10189 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10190 "Clusters not sorted?");
10192 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
10194 // Balance the tree based on branch probabilities to create a near-optimal (in
10195 // terms of search time given key frequency) binary search tree. See e.g. Kurt
10196 // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10197 CaseClusterIt LastLeft = W.FirstCluster;
10198 CaseClusterIt FirstRight = W.LastCluster;
10199 auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10200 auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10202 // Move LastLeft and FirstRight towards each other from opposite directions to
10203 // find a partitioning of the clusters which balances the probability on both
10204 // sides. If LeftProb and RightProb are equal, alternate which side is
10205 // taken to ensure 0-probability nodes are distributed evenly.
10207 while (LastLeft + 1 < FirstRight) {
10208 if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10209 LeftProb += (++LastLeft)->Prob;
10211 RightProb += (--FirstRight)->Prob;
10216 // Our binary search tree differs from a typical BST in that ours can have up
10217 // to three values in each leaf. The pivot selection above doesn't take that
10218 // into account, which means the tree might require more nodes and be less
10219 // efficient. We compensate for this here.
10221 unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10222 unsigned NumRight = W.LastCluster - FirstRight + 1;
10224 if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10225 // If one side has less than 3 clusters, and the other has more than 3,
10226 // consider taking a cluster from the other side.
10228 if (NumLeft < NumRight) {
10229 // Consider moving the first cluster on the right to the left side.
10230 CaseCluster &CC = *FirstRight;
10231 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10232 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10233 if (LeftSideRank <= RightSideRank) {
10234 // Moving the cluster to the left does not demote it.
10240 assert(NumRight < NumLeft);
10241 // Consider moving the last element on the left to the right side.
10242 CaseCluster &CC = *LastLeft;
10243 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10244 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10245 if (RightSideRank <= LeftSideRank) {
10246 // Moving the cluster to the right does not demot it.
10256 assert(LastLeft + 1 == FirstRight);
10257 assert(LastLeft >= W.FirstCluster);
10258 assert(FirstRight <= W.LastCluster);
10260 // Use the first element on the right as pivot since we will make less-than
10261 // comparisons against it.
10262 CaseClusterIt PivotCluster = FirstRight;
10263 assert(PivotCluster > W.FirstCluster);
10264 assert(PivotCluster <= W.LastCluster);
10266 CaseClusterIt FirstLeft = W.FirstCluster;
10267 CaseClusterIt LastRight = W.LastCluster;
10269 const ConstantInt *Pivot = PivotCluster->Low;
10271 // New blocks will be inserted immediately after the current one.
10272 MachineFunction::iterator BBI(W.MBB);
10275 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10276 // we can branch to its destination directly if it's squeezed exactly in
10277 // between the known lower bound and Pivot - 1.
10278 MachineBasicBlock *LeftMBB;
10279 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10280 FirstLeft->Low == W.GE &&
10281 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
10282 LeftMBB = FirstLeft->MBB;
10284 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10285 FuncInfo.MF->insert(BBI, LeftMBB);
10286 WorkList.push_back(
10287 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10288 // Put Cond in a virtual register to make it available from the new blocks.
10289 ExportFromCurrentBlock(Cond);
10292 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10293 // single cluster, RHS.Low == Pivot, and we can branch to its destination
10294 // directly if RHS.High equals the current upper bound.
10295 MachineBasicBlock *RightMBB;
10296 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10297 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10298 RightMBB = FirstRight->MBB;
10300 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10301 FuncInfo.MF->insert(BBI, RightMBB);
10302 WorkList.push_back(
10303 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10304 // Put Cond in a virtual register to make it available from the new blocks.
10305 ExportFromCurrentBlock(Cond);
10308 // Create the CaseBlock record that will be used to lower the branch.
10309 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
10310 getCurSDLoc(), LeftProb, RightProb);
10312 if (W.MBB == SwitchMBB)
10313 visitSwitchCase(CB, SwitchMBB);
10315 SwitchCases.push_back(CB);
10318 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10319 // from the swith statement.
10320 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
10321 BranchProbability PeeledCaseProb) {
10322 if (PeeledCaseProb == BranchProbability::getOne())
10323 return BranchProbability::getZero();
10324 BranchProbability SwitchProb = PeeledCaseProb.getCompl();
10326 uint32_t Numerator = CaseProb.getNumerator();
10327 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
10328 return BranchProbability(Numerator, std::max(Numerator, Denominator));
10331 // Try to peel the top probability case if it exceeds the threshold.
10332 // Return current MachineBasicBlock for the switch statement if the peeling
10334 // If the peeling is performed, return the newly created MachineBasicBlock
10335 // for the peeled switch statement. Also update Clusters to remove the peeled
10336 // case. PeeledCaseProb is the BranchProbability for the peeled case.
10337 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
10338 const SwitchInst &SI, CaseClusterVector &Clusters,
10339 BranchProbability &PeeledCaseProb) {
10340 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10341 // Don't perform if there is only one cluster or optimizing for size.
10342 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
10343 TM.getOptLevel() == CodeGenOpt::None ||
10344 SwitchMBB->getParent()->getFunction().optForMinSize())
10347 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
10348 unsigned PeeledCaseIndex = 0;
10349 bool SwitchPeeled = false;
10350 for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
10351 CaseCluster &CC = Clusters[Index];
10352 if (CC.Prob < TopCaseProb)
10354 TopCaseProb = CC.Prob;
10355 PeeledCaseIndex = Index;
10356 SwitchPeeled = true;
10361 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10362 << TopCaseProb << "\n");
10364 // Record the MBB for the peeled switch statement.
10365 MachineFunction::iterator BBI(SwitchMBB);
10367 MachineBasicBlock *PeeledSwitchMBB =
10368 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
10369 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
10371 ExportFromCurrentBlock(SI.getCondition());
10372 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10373 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10374 nullptr, nullptr, TopCaseProb.getCompl()};
10375 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
10377 Clusters.erase(PeeledCaseIt);
10378 for (CaseCluster &CC : Clusters) {
10380 dbgs() << "Scale the probablity for one cluster, before scaling: "
10381 << CC.Prob << "\n");
10382 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
10383 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
10385 PeeledCaseProb = TopCaseProb;
10386 return PeeledSwitchMBB;
10389 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
10390 // Extract cases from the switch.
10391 BranchProbabilityInfo *BPI = FuncInfo.BPI;
10392 CaseClusterVector Clusters;
10393 Clusters.reserve(SI.getNumCases());
10394 for (auto I : SI.cases()) {
10395 MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
10396 const ConstantInt *CaseVal = I.getCaseValue();
10397 BranchProbability Prob =
10398 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
10399 : BranchProbability(1, SI.getNumCases() + 1);
10400 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10403 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
10405 // Cluster adjacent cases with the same destination. We do this at all
10406 // optimization levels because it's cheap to do and will make codegen faster
10407 // if there are many clusters.
10408 sortAndRangeify(Clusters);
10410 if (TM.getOptLevel() != CodeGenOpt::None) {
10411 // Replace an unreachable default with the most popular destination.
10412 // FIXME: Exploit unreachable default more aggressively.
10413 bool UnreachableDefault =
10414 isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg());
10415 if (UnreachableDefault && !Clusters.empty()) {
10416 DenseMap<const BasicBlock *, unsigned> Popularity;
10417 unsigned MaxPop = 0;
10418 const BasicBlock *MaxBB = nullptr;
10419 for (auto I : SI.cases()) {
10420 const BasicBlock *BB = I.getCaseSuccessor();
10421 if (++Popularity[BB] > MaxPop) {
10422 MaxPop = Popularity[BB];
10426 // Set new default.
10427 assert(MaxPop > 0 && MaxBB);
10428 DefaultMBB = FuncInfo.MBBMap[MaxBB];
10430 // Remove cases that were pointing to the destination that is now the
10432 CaseClusterVector New;
10433 New.reserve(Clusters.size());
10434 for (CaseCluster &CC : Clusters) {
10435 if (CC.MBB != DefaultMBB)
10438 Clusters = std::move(New);
10442 // The branch probablity of the peeled case.
10443 BranchProbability PeeledCaseProb = BranchProbability::getZero();
10444 MachineBasicBlock *PeeledSwitchMBB =
10445 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10447 // If there is only the default destination, jump there directly.
10448 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10449 if (Clusters.empty()) {
10450 assert(PeeledSwitchMBB == SwitchMBB);
10451 SwitchMBB->addSuccessor(DefaultMBB);
10452 if (DefaultMBB != NextBlock(SwitchMBB)) {
10453 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
10454 getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10459 findJumpTables(Clusters, &SI, DefaultMBB);
10460 findBitTestClusters(Clusters, &SI);
10463 dbgs() << "Case clusters: ";
10464 for (const CaseCluster &C : Clusters) {
10465 if (C.Kind == CC_JumpTable)
10467 if (C.Kind == CC_BitTests)
10470 C.Low->getValue().print(dbgs(), true);
10471 if (C.Low != C.High) {
10473 C.High->getValue().print(dbgs(), true);
10480 assert(!Clusters.empty());
10481 SwitchWorkList WorkList;
10482 CaseClusterIt First = Clusters.begin();
10483 CaseClusterIt Last = Clusters.end() - 1;
10484 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10485 // Scale the branchprobability for DefaultMBB if the peel occurs and
10486 // DefaultMBB is not replaced.
10487 if (PeeledCaseProb != BranchProbability::getZero() &&
10488 DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
10489 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
10490 WorkList.push_back(
10491 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
10493 while (!WorkList.empty()) {
10494 SwitchWorkListItem W = WorkList.back();
10495 WorkList.pop_back();
10496 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10498 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10499 !DefaultMBB->getParent()->getFunction().optForMinSize()) {
10500 // For optimized builds, lower large range as a balanced binary tree.
10501 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
10505 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);