1 //===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass does misc. AMDGPU optimizations on IR before instruction
14 //===----------------------------------------------------------------------===//
17 #include "AMDGPUIntrinsicInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/Analysis/DivergenceAnalysis.h"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/IR/Attributes.h"
24 #include "llvm/IR/BasicBlock.h"
25 #include "llvm/IR/Constants.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/InstVisitor.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/IR/Type.h"
38 #include "llvm/IR/Value.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/Casting.h"
44 #define DEBUG_TYPE "amdgpu-codegenprepare"
50 class AMDGPUCodeGenPrepare : public FunctionPass,
51 public InstVisitor<AMDGPUCodeGenPrepare, bool> {
52 const GCNTargetMachine *TM;
53 const SISubtarget *ST = nullptr;
54 DivergenceAnalysis *DA = nullptr;
55 Module *Mod = nullptr;
56 bool HasUnsafeFPMath = false;
58 /// \brief Copies exact/nsw/nuw flags (if any) from binary operation \p I to
59 /// binary operation \p V.
61 /// \returns Binary operation \p V.
62 /// \returns \p T's base element bit width.
63 unsigned getBaseElementBitWidth(const Type *T) const;
65 /// \returns Equivalent 32 bit integer type for given type \p T. For example,
66 /// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32>
68 Type *getI32Ty(IRBuilder<> &B, const Type *T) const;
70 /// \returns True if binary operation \p I is a signed binary operation, false
72 bool isSigned(const BinaryOperator &I) const;
74 /// \returns True if the condition of 'select' operation \p I comes from a
75 /// signed 'icmp' operation, false otherwise.
76 bool isSigned(const SelectInst &I) const;
78 /// \returns True if type \p T needs to be promoted to 32 bit integer type,
80 bool needsPromotionToI32(const Type *T) const;
82 /// \brief Promotes uniform binary operation \p I to equivalent 32 bit binary
85 /// \details \p I's base element bit width must be greater than 1 and less
86 /// than or equal 16. Promotion is done by sign or zero extending operands to
87 /// 32 bits, replacing \p I with equivalent 32 bit binary operation, and
88 /// truncating the result of 32 bit binary operation back to \p I's original
89 /// type. Division operation is not promoted.
91 /// \returns True if \p I is promoted to equivalent 32 bit binary operation,
93 bool promoteUniformOpToI32(BinaryOperator &I) const;
95 /// \brief Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation.
97 /// \details \p I's base element bit width must be greater than 1 and less
98 /// than or equal 16. Promotion is done by sign or zero extending operands to
99 /// 32 bits, and replacing \p I with 32 bit 'icmp' operation.
102 bool promoteUniformOpToI32(ICmpInst &I) const;
104 /// \brief Promotes uniform 'select' operation \p I to 32 bit 'select'
107 /// \details \p I's base element bit width must be greater than 1 and less
108 /// than or equal 16. Promotion is done by sign or zero extending operands to
109 /// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the
110 /// result of 32 bit 'select' operation back to \p I's original type.
113 bool promoteUniformOpToI32(SelectInst &I) const;
115 /// \brief Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse'
118 /// \details \p I's base element bit width must be greater than 1 and less
119 /// than or equal 16. Promotion is done by zero extending the operand to 32
120 /// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the
121 /// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the
122 /// shift amount is 32 minus \p I's base element bit width), and truncating
123 /// the result of the shift operation back to \p I's original type.
126 bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
131 AMDGPUCodeGenPrepare(const TargetMachine *TM = nullptr) :
132 FunctionPass(ID), TM(static_cast<const GCNTargetMachine *>(TM)) {}
134 bool visitFDiv(BinaryOperator &I);
136 bool visitInstruction(Instruction &I) { return false; }
137 bool visitBinaryOperator(BinaryOperator &I);
138 bool visitICmpInst(ICmpInst &I);
139 bool visitSelectInst(SelectInst &I);
141 bool visitIntrinsicInst(IntrinsicInst &I);
142 bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
144 bool doInitialization(Module &M) override;
145 bool runOnFunction(Function &F) override;
147 StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
149 void getAnalysisUsage(AnalysisUsage &AU) const override {
150 AU.addRequired<DivergenceAnalysis>();
151 AU.setPreservesAll();
155 } // end anonymous namespace
157 unsigned AMDGPUCodeGenPrepare::getBaseElementBitWidth(const Type *T) const {
158 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
160 if (T->isIntegerTy())
161 return T->getIntegerBitWidth();
162 return cast<VectorType>(T)->getElementType()->getIntegerBitWidth();
165 Type *AMDGPUCodeGenPrepare::getI32Ty(IRBuilder<> &B, const Type *T) const {
166 assert(needsPromotionToI32(T) && "T does not need promotion to i32");
168 if (T->isIntegerTy())
169 return B.getInt32Ty();
170 return VectorType::get(B.getInt32Ty(), cast<VectorType>(T)->getNumElements());
173 bool AMDGPUCodeGenPrepare::isSigned(const BinaryOperator &I) const {
174 return I.getOpcode() == Instruction::AShr ||
175 I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
178 bool AMDGPUCodeGenPrepare::isSigned(const SelectInst &I) const {
179 return isa<ICmpInst>(I.getOperand(0)) ?
180 cast<ICmpInst>(I.getOperand(0))->isSigned() : false;
183 bool AMDGPUCodeGenPrepare::needsPromotionToI32(const Type *T) const {
184 const IntegerType *IntTy = dyn_cast<IntegerType>(T);
185 if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16)
188 if (const VectorType *VT = dyn_cast<VectorType>(T)) {
189 // TODO: The set of packed operations is more limited, so may want to
190 // promote some anyway.
191 if (ST->hasVOP3PInsts())
194 return needsPromotionToI32(VT->getElementType());
200 // Return true if the op promoted to i32 should have nsw set.
201 static bool promotedOpIsNSW(const Instruction &I) {
202 switch (I.getOpcode()) {
203 case Instruction::Shl:
204 case Instruction::Add:
205 case Instruction::Sub:
207 case Instruction::Mul:
208 return I.hasNoUnsignedWrap();
214 // Return true if the op promoted to i32 should have nuw set.
215 static bool promotedOpIsNUW(const Instruction &I) {
216 switch (I.getOpcode()) {
217 case Instruction::Shl:
218 case Instruction::Add:
219 case Instruction::Mul:
221 case Instruction::Sub:
222 return I.hasNoUnsignedWrap();
228 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(BinaryOperator &I) const {
229 assert(needsPromotionToI32(I.getType()) &&
230 "I does not need promotion to i32");
232 if (I.getOpcode() == Instruction::SDiv ||
233 I.getOpcode() == Instruction::UDiv)
236 IRBuilder<> Builder(&I);
237 Builder.SetCurrentDebugLocation(I.getDebugLoc());
239 Type *I32Ty = getI32Ty(Builder, I.getType());
240 Value *ExtOp0 = nullptr;
241 Value *ExtOp1 = nullptr;
242 Value *ExtRes = nullptr;
243 Value *TruncRes = nullptr;
246 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
247 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
249 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
250 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
253 ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1);
254 if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) {
255 if (promotedOpIsNSW(cast<Instruction>(I)))
256 Inst->setHasNoSignedWrap();
258 if (promotedOpIsNUW(cast<Instruction>(I)))
259 Inst->setHasNoUnsignedWrap();
261 if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
262 Inst->setIsExact(ExactOp->isExact());
265 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
267 I.replaceAllUsesWith(TruncRes);
273 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(ICmpInst &I) const {
274 assert(needsPromotionToI32(I.getOperand(0)->getType()) &&
275 "I does not need promotion to i32");
277 IRBuilder<> Builder(&I);
278 Builder.SetCurrentDebugLocation(I.getDebugLoc());
280 Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType());
281 Value *ExtOp0 = nullptr;
282 Value *ExtOp1 = nullptr;
283 Value *NewICmp = nullptr;
286 ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
287 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
289 ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
290 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
292 NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1);
294 I.replaceAllUsesWith(NewICmp);
300 bool AMDGPUCodeGenPrepare::promoteUniformOpToI32(SelectInst &I) const {
301 assert(needsPromotionToI32(I.getType()) &&
302 "I does not need promotion to i32");
304 IRBuilder<> Builder(&I);
305 Builder.SetCurrentDebugLocation(I.getDebugLoc());
307 Type *I32Ty = getI32Ty(Builder, I.getType());
308 Value *ExtOp1 = nullptr;
309 Value *ExtOp2 = nullptr;
310 Value *ExtRes = nullptr;
311 Value *TruncRes = nullptr;
314 ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
315 ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty);
317 ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
318 ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty);
320 ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2);
321 TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
323 I.replaceAllUsesWith(TruncRes);
329 bool AMDGPUCodeGenPrepare::promoteUniformBitreverseToI32(
330 IntrinsicInst &I) const {
331 assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
332 "I must be bitreverse intrinsic");
333 assert(needsPromotionToI32(I.getType()) &&
334 "I does not need promotion to i32");
336 IRBuilder<> Builder(&I);
337 Builder.SetCurrentDebugLocation(I.getDebugLoc());
339 Type *I32Ty = getI32Ty(Builder, I.getType());
341 Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
342 Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
343 Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
345 Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
347 Builder.CreateTrunc(LShrOp, I.getType());
349 I.replaceAllUsesWith(TruncRes);
355 static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv) {
356 const ConstantFP *CNum = dyn_cast<ConstantFP>(Num);
360 // Reciprocal f32 is handled separately without denormals.
361 return UnsafeDiv || CNum->isExactlyValue(+1.0);
364 // Insert an intrinsic for fast fdiv for safe math situations where we can
365 // reduce precision. Leave fdiv for situations where the generic node is
366 // expected to be optimized.
367 bool AMDGPUCodeGenPrepare::visitFDiv(BinaryOperator &FDiv) {
368 Type *Ty = FDiv.getType();
370 if (!Ty->getScalarType()->isFloatTy())
373 MDNode *FPMath = FDiv.getMetadata(LLVMContext::MD_fpmath);
377 const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
378 float ULP = FPOp->getFPAccuracy();
382 FastMathFlags FMF = FPOp->getFastMathFlags();
383 bool UnsafeDiv = HasUnsafeFPMath || FMF.unsafeAlgebra() ||
384 FMF.allowReciprocal();
385 if (ST->hasFP32Denormals() && !UnsafeDiv)
388 IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()), FPMath);
389 Builder.setFastMathFlags(FMF);
390 Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
392 const AMDGPUIntrinsicInfo *II = TM->getIntrinsicInfo();
394 = II->getDeclaration(Mod, AMDGPUIntrinsic::amdgcn_fdiv_fast, {});
396 Value *Num = FDiv.getOperand(0);
397 Value *Den = FDiv.getOperand(1);
399 Value *NewFDiv = nullptr;
401 if (VectorType *VT = dyn_cast<VectorType>(Ty)) {
402 NewFDiv = UndefValue::get(VT);
404 // FIXME: Doesn't do the right thing for cases where the vector is partially
405 // constant. This works when the scalarizer pass is run first.
406 for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) {
407 Value *NumEltI = Builder.CreateExtractElement(Num, I);
408 Value *DenEltI = Builder.CreateExtractElement(Den, I);
411 if (shouldKeepFDivF32(NumEltI, UnsafeDiv)) {
412 NewElt = Builder.CreateFDiv(NumEltI, DenEltI);
414 NewElt = Builder.CreateCall(Decl, { NumEltI, DenEltI });
417 NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I);
420 if (!shouldKeepFDivF32(Num, UnsafeDiv))
421 NewFDiv = Builder.CreateCall(Decl, { Num, Den });
425 FDiv.replaceAllUsesWith(NewFDiv);
426 NewFDiv->takeName(&FDiv);
427 FDiv.eraseFromParent();
433 static bool hasUnsafeFPMath(const Function &F) {
434 Attribute Attr = F.getFnAttribute("unsafe-fp-math");
435 return Attr.getValueAsString() == "true";
438 bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
439 bool Changed = false;
441 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
443 Changed |= promoteUniformOpToI32(I);
448 bool AMDGPUCodeGenPrepare::visitICmpInst(ICmpInst &I) {
449 bool Changed = false;
451 if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) &&
453 Changed |= promoteUniformOpToI32(I);
458 bool AMDGPUCodeGenPrepare::visitSelectInst(SelectInst &I) {
459 bool Changed = false;
461 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
463 Changed |= promoteUniformOpToI32(I);
468 bool AMDGPUCodeGenPrepare::visitIntrinsicInst(IntrinsicInst &I) {
469 switch (I.getIntrinsicID()) {
470 case Intrinsic::bitreverse:
471 return visitBitreverseIntrinsicInst(I);
477 bool AMDGPUCodeGenPrepare::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
478 bool Changed = false;
480 if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
482 Changed |= promoteUniformBitreverseToI32(I);
487 bool AMDGPUCodeGenPrepare::doInitialization(Module &M) {
492 bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
493 if (!TM || skipFunction(F))
496 ST = &TM->getSubtarget<SISubtarget>(F);
497 DA = &getAnalysis<DivergenceAnalysis>();
498 HasUnsafeFPMath = hasUnsafeFPMath(F);
500 bool MadeChange = false;
502 for (BasicBlock &BB : F) {
503 BasicBlock::iterator Next;
504 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; I = Next) {
506 MadeChange |= visit(*I);
513 INITIALIZE_TM_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
514 "AMDGPU IR optimizations", false, false)
515 INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis)
516 INITIALIZE_TM_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE,
517 "AMDGPU IR optimizations", false, false)
519 char AMDGPUCodeGenPrepare::ID = 0;
521 FunctionPass *llvm::createAMDGPUCodeGenPreparePass(const GCNTargetMachine *TM) {
522 return new AMDGPUCodeGenPrepare(TM);