1 //===- InstCombineCasts.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the visit functions for cast operations.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/Analysis/ConstantFolding.h"
16 #include "llvm/Analysis/TargetLibraryInfo.h"
17 #include "llvm/IR/DIBuilder.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/KnownBits.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
24 using namespace PatternMatch;
26 #define DEBUG_TYPE "instcombine"
28 /// Analyze 'Val', seeing if it is a simple linear expression.
29 /// If so, decompose it, returning some value X, such that Val is
32 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
34 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
35 Offset = CI->getZExtValue();
37 return ConstantInt::get(Val->getType(), 0);
40 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
41 // Cannot look past anything that might overflow.
42 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
43 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) {
49 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
50 if (I->getOpcode() == Instruction::Shl) {
51 // This is a value scaled by '1 << the shift amt'.
52 Scale = UINT64_C(1) << RHS->getZExtValue();
54 return I->getOperand(0);
57 if (I->getOpcode() == Instruction::Mul) {
58 // This value is scaled by 'RHS'.
59 Scale = RHS->getZExtValue();
61 return I->getOperand(0);
64 if (I->getOpcode() == Instruction::Add) {
65 // We have X+C. Check to see if we really have (X*C2)+C1,
66 // where C1 is divisible by C2.
69 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
70 Offset += RHS->getZExtValue();
77 // Otherwise, we can't look past this.
83 /// If we find a cast of an allocation instruction, try to eliminate the cast by
84 /// moving the type information into the alloc.
85 Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI,
87 PointerType *PTy = cast<PointerType>(CI.getType());
89 IRBuilderBase::InsertPointGuard Guard(Builder);
90 Builder.SetInsertPoint(&AI);
92 // Get the type really allocated and the type casted to.
93 Type *AllocElTy = AI.getAllocatedType();
94 Type *CastElTy = PTy->getElementType();
95 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
97 // This optimisation does not work for cases where the cast type
98 // is scalable and the allocated type is not. This because we need to
99 // know how many times the casted type fits into the allocated type.
100 // For the opposite case where the allocated type is scalable and the
101 // cast type is not this leads to poor code quality due to the
102 // introduction of 'vscale' into the calculations. It seems better to
103 // bail out for this case too until we've done a proper cost-benefit
105 bool AllocIsScalable = isa<ScalableVectorType>(AllocElTy);
106 bool CastIsScalable = isa<ScalableVectorType>(CastElTy);
107 if (AllocIsScalable != CastIsScalable) return nullptr;
109 Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy);
110 Align CastElTyAlign = DL.getABITypeAlign(CastElTy);
111 if (CastElTyAlign < AllocElTyAlign) return nullptr;
113 // If the allocation has multiple uses, only promote it if we are strictly
114 // increasing the alignment of the resultant allocation. If we keep it the
115 // same, we open the door to infinite loops of various kinds.
116 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
118 // The alloc and cast types should be either both fixed or both scalable.
119 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinSize();
120 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinSize();
121 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
123 // If the allocation has multiple uses, only promote it if we're not
124 // shrinking the amount of memory being allocated.
125 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinSize();
126 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinSize();
127 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
129 // See if we can satisfy the modulus by pulling a scale out of the array
131 unsigned ArraySizeScale;
132 uint64_t ArrayOffset;
133 Value *NumElements = // See if the array size is a decomposable linear expr.
134 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
136 // If we can now satisfy the modulus, by using a non-1 scale, we really can
138 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
139 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr;
141 // We don't currently support arrays of scalable types.
142 assert(!AllocIsScalable || (ArrayOffset == 1 && ArraySizeScale == 0));
144 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
145 Value *Amt = nullptr;
149 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale);
150 // Insert before the alloca, not before the cast.
151 Amt = Builder.CreateMul(Amt, NumElements);
154 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
155 Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
157 Amt = Builder.CreateAdd(Amt, Off);
160 AllocaInst *New = Builder.CreateAlloca(CastElTy, Amt);
161 New->setAlignment(AI.getAlign());
163 New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
165 // If the allocation has multiple real uses, insert a cast and change all
166 // things that used it to use the new cast. This will also hack on CI, but it
168 if (!AI.hasOneUse()) {
169 // New is the allocation instruction, pointer typed. AI is the original
170 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
171 Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast");
172 replaceInstUsesWith(AI, NewCast);
173 eraseInstFromFunction(AI);
175 return replaceInstUsesWith(CI, New);
178 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns
179 /// true for, actually insert the code to evaluate the expression.
180 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty,
182 if (Constant *C = dyn_cast<Constant>(V)) {
183 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
184 // If we got a constantexpr back, try to simplify it with DL info.
185 return ConstantFoldConstant(C, DL, &TLI);
188 // Otherwise, it must be an instruction.
189 Instruction *I = cast<Instruction>(V);
190 Instruction *Res = nullptr;
191 unsigned Opc = I->getOpcode();
193 case Instruction::Add:
194 case Instruction::Sub:
195 case Instruction::Mul:
196 case Instruction::And:
197 case Instruction::Or:
198 case Instruction::Xor:
199 case Instruction::AShr:
200 case Instruction::LShr:
201 case Instruction::Shl:
202 case Instruction::UDiv:
203 case Instruction::URem: {
204 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
205 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
206 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
209 case Instruction::Trunc:
210 case Instruction::ZExt:
211 case Instruction::SExt:
212 // If the source type of the cast is the type we're trying for then we can
213 // just return the source. There's no need to insert it because it is not
215 if (I->getOperand(0)->getType() == Ty)
216 return I->getOperand(0);
218 // Otherwise, must be the same type of cast, so just reinsert a new one.
219 // This also handles the case of zext(trunc(x)) -> zext(x).
220 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
221 Opc == Instruction::SExt);
223 case Instruction::Select: {
224 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
225 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
226 Res = SelectInst::Create(I->getOperand(0), True, False);
229 case Instruction::PHI: {
230 PHINode *OPN = cast<PHINode>(I);
231 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues());
232 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
234 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
235 NPN->addIncoming(V, OPN->getIncomingBlock(i));
241 // TODO: Can handle more cases here.
242 llvm_unreachable("Unreachable!");
246 return InsertNewInstWith(Res, *I);
250 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1,
251 const CastInst *CI2) {
252 Type *SrcTy = CI1->getSrcTy();
253 Type *MidTy = CI1->getDestTy();
254 Type *DstTy = CI2->getDestTy();
256 Instruction::CastOps firstOp = CI1->getOpcode();
257 Instruction::CastOps secondOp = CI2->getOpcode();
259 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
261 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
263 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
264 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
265 DstTy, SrcIntPtrTy, MidIntPtrTy,
268 // We don't want to form an inttoptr or ptrtoint that converts to an integer
269 // type that differs from the pointer size.
270 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
271 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
274 return Instruction::CastOps(Res);
277 /// Implement the transforms common to all CastInst visitors.
278 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) {
279 Value *Src = CI.getOperand(0);
281 // Try to eliminate a cast of a cast.
282 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
283 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) {
284 // The first cast (CSrc) is eliminable so we need to fix up or replace
285 // the second cast (CI). CSrc will then have a good chance of being dead.
286 auto *Ty = CI.getType();
287 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty);
288 // Point debug users of the dying cast to the new one.
289 if (CSrc->hasOneUse())
290 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT);
295 if (auto *Sel = dyn_cast<SelectInst>(Src)) {
296 // We are casting a select. Try to fold the cast into the select if the
297 // select does not have a compare instruction with matching operand types
298 // or the select is likely better done in a narrow type.
299 // Creating a select with operands that are different sizes than its
300 // condition may inhibit other folds and lead to worse codegen.
301 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition());
302 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() ||
303 (CI.getOpcode() == Instruction::Trunc &&
304 shouldChangeType(CI.getSrcTy(), CI.getType()))) {
305 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) {
306 replaceAllDbgUsesWith(*Sel, *NV, CI, DT);
312 // If we are casting a PHI, then fold the cast into the PHI.
313 if (auto *PN = dyn_cast<PHINode>(Src)) {
314 // Don't do this if it would create a PHI node with an illegal type from a
316 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() ||
317 shouldChangeType(CI.getSrcTy(), CI.getType()))
318 if (Instruction *NV = foldOpIntoPhi(CI, PN))
325 /// Constants and extensions/truncates from the destination type are always
326 /// free to be evaluated in that type. This is a helper for canEvaluate*.
327 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) {
328 if (isa<Constant>(V))
331 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) &&
338 /// Filter out values that we can not evaluate in the destination type for free.
339 /// This is a helper for canEvaluate*.
340 static bool canNotEvaluateInType(Value *V, Type *Ty) {
341 assert(!isa<Constant>(V) && "Constant should already be handled.");
342 if (!isa<Instruction>(V))
344 // We don't extend or shrink something that has multiple uses -- doing so
345 // would require duplicating the instruction which isn't profitable.
352 /// Return true if we can evaluate the specified expression tree as type Ty
353 /// instead of its larger type, and arrive with the same value.
354 /// This is used by code that tries to eliminate truncates.
356 /// Ty will always be a type smaller than V. We should return true if trunc(V)
357 /// can be computed by computing V in the smaller type. If V is an instruction,
358 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only
359 /// makes sense if x and y can be efficiently truncated.
361 /// This function works on both vectors and scalars.
363 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC,
365 if (canAlwaysEvaluateInType(V, Ty))
367 if (canNotEvaluateInType(V, Ty))
370 auto *I = cast<Instruction>(V);
371 Type *OrigTy = V->getType();
372 switch (I->getOpcode()) {
373 case Instruction::Add:
374 case Instruction::Sub:
375 case Instruction::Mul:
376 case Instruction::And:
377 case Instruction::Or:
378 case Instruction::Xor:
379 // These operators can all arbitrarily be extended or truncated.
380 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
381 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
383 case Instruction::UDiv:
384 case Instruction::URem: {
385 // UDiv and URem can be truncated if all the truncated bits are zero.
386 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
387 uint32_t BitWidth = Ty->getScalarSizeInBits();
388 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!");
389 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
390 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) &&
391 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) {
392 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
393 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
397 case Instruction::Shl: {
398 // If we are truncating the result of this SHL, and if it's a shift of an
399 // inrange amount, we can always perform a SHL in a smaller type.
400 uint32_t BitWidth = Ty->getScalarSizeInBits();
401 KnownBits AmtKnownBits =
402 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
403 if (AmtKnownBits.getMaxValue().ult(BitWidth))
404 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
405 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
408 case Instruction::LShr: {
409 // If this is a truncate of a logical shr, we can truncate it to a smaller
410 // lshr iff we know that the bits we would otherwise be shifting in are
412 // TODO: It is enough to check that the bits we would be shifting in are
413 // zero - use AmtKnownBits.getMaxValue().
414 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
415 uint32_t BitWidth = Ty->getScalarSizeInBits();
416 KnownBits AmtKnownBits =
417 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
418 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
419 if (AmtKnownBits.getMaxValue().ult(BitWidth) &&
420 IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) {
421 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
422 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
426 case Instruction::AShr: {
427 // If this is a truncate of an arithmetic shr, we can truncate it to a
428 // smaller ashr iff we know that all the bits from the sign bit of the
429 // original type and the sign bit of the truncate type are similar.
430 // TODO: It is enough to check that the bits we would be shifting in are
431 // similar to sign bit of the truncate type.
432 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
433 uint32_t BitWidth = Ty->getScalarSizeInBits();
434 KnownBits AmtKnownBits =
435 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
436 unsigned ShiftedBits = OrigBitWidth - BitWidth;
437 if (AmtKnownBits.getMaxValue().ult(BitWidth) &&
438 ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI))
439 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
440 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
443 case Instruction::Trunc:
444 // trunc(trunc(x)) -> trunc(x)
446 case Instruction::ZExt:
447 case Instruction::SExt:
448 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
449 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest
451 case Instruction::Select: {
452 SelectInst *SI = cast<SelectInst>(I);
453 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) &&
454 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI);
456 case Instruction::PHI: {
457 // We can change a phi if we can change all operands. Note that we never
458 // get into trouble with cyclic PHIs here because we only consider
459 // instructions with a single use.
460 PHINode *PN = cast<PHINode>(I);
461 for (Value *IncValue : PN->incoming_values())
462 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI))
467 // TODO: Can handle more cases here.
474 /// Given a vector that is bitcast to an integer, optionally logically
475 /// right-shifted, and truncated, convert it to an extractelement.
476 /// Example (big endian):
477 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32
479 /// extractelement <4 x i32> %X, 1
480 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc,
481 InstCombinerImpl &IC) {
482 Value *TruncOp = Trunc.getOperand(0);
483 Type *DestType = Trunc.getType();
484 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType))
487 Value *VecInput = nullptr;
488 ConstantInt *ShiftVal = nullptr;
489 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)),
490 m_LShr(m_BitCast(m_Value(VecInput)),
491 m_ConstantInt(ShiftVal)))) ||
492 !isa<VectorType>(VecInput->getType()))
495 VectorType *VecType = cast<VectorType>(VecInput->getType());
496 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
497 unsigned DestWidth = DestType->getPrimitiveSizeInBits();
498 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0;
500 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
503 // If the element type of the vector doesn't match the result type,
504 // bitcast it to a vector type that we can extract from.
505 unsigned NumVecElts = VecWidth / DestWidth;
506 if (VecType->getElementType() != DestType) {
507 VecType = FixedVectorType::get(DestType, NumVecElts);
508 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc");
511 unsigned Elt = ShiftAmount / DestWidth;
512 if (IC.getDataLayout().isBigEndian())
513 Elt = NumVecElts - 1 - Elt;
515 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt));
518 /// Rotate left/right may occur in a wider type than necessary because of type
519 /// promotion rules. Try to narrow the inputs and convert to funnel shift.
520 Instruction *InstCombinerImpl::narrowRotate(TruncInst &Trunc) {
521 assert((isa<VectorType>(Trunc.getSrcTy()) ||
522 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) &&
523 "Don't narrow to an illegal scalar type");
525 // Bail out on strange types. It is possible to handle some of these patterns
526 // even with non-power-of-2 sizes, but it is not a likely scenario.
527 Type *DestTy = Trunc.getType();
528 unsigned NarrowWidth = DestTy->getScalarSizeInBits();
529 if (!isPowerOf2_32(NarrowWidth))
532 // First, find an or'd pair of opposite shifts with the same shifted operand:
533 // trunc (or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1))
535 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1)))))
538 Value *ShVal, *ShAmt0, *ShAmt1;
539 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) ||
540 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1)))))
543 auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode();
544 auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode();
545 if (ShiftOpcode0 == ShiftOpcode1)
548 // Match the shift amount operands for a rotate pattern. This always matches
549 // a subtraction on the R operand.
550 auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * {
551 // The shift amounts may add up to the narrow bit width:
552 // (shl ShVal, L) | (lshr ShVal, Width - L)
553 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L)))))
556 // The shift amount may be masked with negation:
557 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
559 unsigned Mask = Width - 1;
560 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
561 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
564 // Same as above, but the shift amount may be extended after masking:
565 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
566 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
572 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth);
573 bool SubIsOnLHS = false;
575 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth);
581 // The shifted value must have high zeros in the wide type. Typically, this
582 // will be a zext, but it could also be the result of an 'and' or 'shift'.
583 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits();
584 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth);
585 if (!MaskedValueIsZero(ShVal, HiBitMask, 0, &Trunc))
588 // We have an unnecessarily wide rotate!
589 // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt))
590 // Narrow the inputs and convert to funnel shift intrinsic:
591 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt))
592 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy);
593 Value *X = Builder.CreateTrunc(ShVal, DestTy);
594 bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) ||
595 (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl);
596 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
597 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy);
598 return IntrinsicInst::Create(F, { X, X, NarrowShAmt });
601 /// Try to narrow the width of math or bitwise logic instructions by pulling a
602 /// truncate ahead of binary operators.
603 /// TODO: Transforms for truncated shifts should be moved into here.
604 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) {
605 Type *SrcTy = Trunc.getSrcTy();
606 Type *DestTy = Trunc.getType();
607 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy))
610 BinaryOperator *BinOp;
611 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp))))
614 Value *BinOp0 = BinOp->getOperand(0);
615 Value *BinOp1 = BinOp->getOperand(1);
616 switch (BinOp->getOpcode()) {
617 case Instruction::And:
618 case Instruction::Or:
619 case Instruction::Xor:
620 case Instruction::Add:
621 case Instruction::Sub:
622 case Instruction::Mul: {
624 if (match(BinOp0, m_Constant(C))) {
625 // trunc (binop C, X) --> binop (trunc C', X)
626 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
627 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy);
628 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX);
630 if (match(BinOp1, m_Constant(C))) {
631 // trunc (binop X, C) --> binop (trunc X, C')
632 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
633 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy);
634 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC);
637 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
638 // trunc (binop (ext X), Y) --> binop X, (trunc Y)
639 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy);
640 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1);
642 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
643 // trunc (binop Y, (ext X)) --> binop (trunc Y), X
644 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy);
645 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X);
653 if (Instruction *NarrowOr = narrowRotate(Trunc))
659 /// Try to narrow the width of a splat shuffle. This could be generalized to any
660 /// shuffle with a constant operand, but we limit the transform to avoid
661 /// creating a shuffle type that targets may not be able to lower effectively.
662 static Instruction *shrinkSplatShuffle(TruncInst &Trunc,
663 InstCombiner::BuilderTy &Builder) {
664 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0));
665 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) &&
666 is_splat(Shuf->getShuffleMask()) &&
667 Shuf->getType() == Shuf->getOperand(0)->getType()) {
668 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask
669 Constant *NarrowUndef = UndefValue::get(Trunc.getType());
670 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType());
671 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getShuffleMask());
677 /// Try to narrow the width of an insert element. This could be generalized for
678 /// any vector constant, but we limit the transform to insertion into undef to
679 /// avoid potential backend problems from unsupported insertion widths. This
680 /// could also be extended to handle the case of inserting a scalar constant
681 /// into a vector variable.
682 static Instruction *shrinkInsertElt(CastInst &Trunc,
683 InstCombiner::BuilderTy &Builder) {
684 Instruction::CastOps Opcode = Trunc.getOpcode();
685 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
686 "Unexpected instruction for shrinking");
688 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0));
689 if (!InsElt || !InsElt->hasOneUse())
692 Type *DestTy = Trunc.getType();
693 Type *DestScalarTy = DestTy->getScalarType();
694 Value *VecOp = InsElt->getOperand(0);
695 Value *ScalarOp = InsElt->getOperand(1);
696 Value *Index = InsElt->getOperand(2);
698 if (isa<UndefValue>(VecOp)) {
699 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index
700 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index
701 UndefValue *NarrowUndef = UndefValue::get(DestTy);
702 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy);
703 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index);
709 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
710 if (Instruction *Result = commonCastTransforms(Trunc))
713 Value *Src = Trunc.getOperand(0);
714 Type *DestTy = Trunc.getType(), *SrcTy = Src->getType();
715 unsigned DestWidth = DestTy->getScalarSizeInBits();
716 unsigned SrcWidth = SrcTy->getScalarSizeInBits();
718 // Attempt to truncate the entire input expression tree to the destination
719 // type. Only do this if the dest type is a simple type, don't convert the
720 // expression tree to something weird like i93 unless the source is also
722 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
723 canEvaluateTruncated(Src, DestTy, *this, &Trunc)) {
725 // If this cast is a truncate, evaluting in a different type always
726 // eliminates the cast, so it is always a win.
728 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
731 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
732 assert(Res->getType() == DestTy);
733 return replaceInstUsesWith(Trunc, Res);
736 // For integer types, check if we can shorten the entire input expression to
737 // DestWidth * 2, which won't allow removing the truncate, but reducing the
738 // width may enable further optimizations, e.g. allowing for larger
739 // vectorization factors.
740 if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) {
741 if (DestWidth * 2 < SrcWidth) {
742 auto *NewDestTy = DestITy->getExtendedType();
743 if (shouldChangeType(SrcTy, NewDestTy) &&
744 canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) {
746 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
747 " to reduce the width of operand of"
749 Value *Res = EvaluateInDifferentType(Src, NewDestTy, false);
750 return new TruncInst(Res, DestTy);
755 // Test if the trunc is the user of a select which is part of a
756 // minimum or maximum operation. If so, don't do any more simplification.
757 // Even simplifying demanded bits can break the canonical form of a
760 if (SelectInst *Sel = dyn_cast<SelectInst>(Src))
761 if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN)
764 // See if we can simplify any instructions used by the input whose sole
765 // purpose is to compute bits we don't care about.
766 if (SimplifyDemandedInstructionBits(Trunc))
769 if (DestWidth == 1) {
770 Value *Zero = Constant::getNullValue(SrcTy);
771 if (DestTy->isIntegerTy()) {
772 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only).
773 // TODO: We canonicalize to more instructions here because we are probably
774 // lacking equivalent analysis for trunc relative to icmp. There may also
775 // be codegen concerns. If those trunc limitations were removed, we could
776 // remove this transform.
777 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1));
778 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
781 // For vectors, we do not canonicalize all truncs to icmp, so optimize
782 // patterns that would be covered within visitICmpInst.
785 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) {
786 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0
787 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1));
788 Constant *MaskC = ConstantExpr::getShl(One, C);
789 Value *And = Builder.CreateAnd(X, MaskC);
790 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
792 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_Constant(C)),
794 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0
795 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1));
796 Constant *MaskC = ConstantExpr::getShl(One, C);
797 MaskC = ConstantExpr::getOr(MaskC, One);
798 Value *And = Builder.CreateAnd(X, MaskC);
799 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
805 if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) {
806 unsigned AWidth = A->getType()->getScalarSizeInBits();
807 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth);
808 auto *OldSh = cast<Instruction>(Src);
809 bool IsExact = OldSh->isExact();
811 // If the shift is small enough, all zero bits created by the shift are
812 // removed by the trunc.
813 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE,
814 APInt(SrcWidth, MaxShiftAmt)))) {
815 // trunc (lshr (sext A), C) --> ashr A, C
816 if (A->getType() == DestTy) {
817 Constant *MaxAmt = ConstantInt::get(SrcTy, DestWidth - 1, false);
818 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt);
819 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType());
820 ShAmt = Constant::mergeUndefsWith(ShAmt, C);
821 return IsExact ? BinaryOperator::CreateExactAShr(A, ShAmt)
822 : BinaryOperator::CreateAShr(A, ShAmt);
824 // The types are mismatched, so create a cast after shifting:
825 // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C)
826 if (Src->hasOneUse()) {
827 Constant *MaxAmt = ConstantInt::get(SrcTy, AWidth - 1, false);
828 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt);
829 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType());
830 Value *Shift = Builder.CreateAShr(A, ShAmt, "", IsExact);
831 return CastInst::CreateIntegerCast(Shift, DestTy, true);
834 // TODO: Mask high bits with 'and'.
837 // trunc (*shr (trunc A), C) --> trunc(*shr A, C)
838 if (match(Src, m_OneUse(m_Shr(m_Trunc(m_Value(A)), m_Constant(C))))) {
839 unsigned MaxShiftAmt = SrcWidth - DestWidth;
841 // If the shift is small enough, all zero/sign bits created by the shift are
842 // removed by the trunc.
843 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE,
844 APInt(SrcWidth, MaxShiftAmt)))) {
845 auto *OldShift = cast<Instruction>(Src);
846 bool IsExact = OldShift->isExact();
847 auto *ShAmt = ConstantExpr::getIntegerCast(C, A->getType(), true);
848 ShAmt = Constant::mergeUndefsWith(ShAmt, C);
850 OldShift->getOpcode() == Instruction::AShr
851 ? Builder.CreateAShr(A, ShAmt, OldShift->getName(), IsExact)
852 : Builder.CreateLShr(A, ShAmt, OldShift->getName(), IsExact);
853 return CastInst::CreateTruncOrBitCast(Shift, DestTy);
857 if (Instruction *I = narrowBinOp(Trunc))
860 if (Instruction *I = shrinkSplatShuffle(Trunc, Builder))
863 if (Instruction *I = shrinkInsertElt(Trunc, Builder))
866 if (Src->hasOneUse() &&
867 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) {
868 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the
869 // dest type is native and cst < dest size.
870 if (match(Src, m_Shl(m_Value(A), m_Constant(C))) &&
871 !match(A, m_Shr(m_Value(), m_Constant()))) {
872 // Skip shifts of shift by constants. It undoes a combine in
873 // FoldShiftByConstant and is the extend in reg pattern.
874 APInt Threshold = APInt(C->getType()->getScalarSizeInBits(), DestWidth);
875 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold))) {
876 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr");
877 return BinaryOperator::Create(Instruction::Shl, NewTrunc,
878 ConstantExpr::getTrunc(C, DestTy));
883 if (Instruction *I = foldVecTruncToExtElt(Trunc, *this))
886 // Whenever an element is extracted from a vector, and then truncated,
887 // canonicalize by converting it to a bitcast followed by an
890 // Example (little endian):
891 // trunc (extractelement <4 x i64> %X, 0) to i32
893 // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0
896 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) {
897 auto *VecOpTy = cast<FixedVectorType>(VecOp->getType());
898 unsigned VecNumElts = VecOpTy->getNumElements();
900 // A badly fit destination size would result in an invalid cast.
901 if (SrcWidth % DestWidth == 0) {
902 uint64_t TruncRatio = SrcWidth / DestWidth;
903 uint64_t BitCastNumElts = VecNumElts * TruncRatio;
904 uint64_t VecOpIdx = Cst->getZExtValue();
905 uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1
906 : VecOpIdx * TruncRatio;
907 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() &&
910 auto *BitCastTo = FixedVectorType::get(DestTy, BitCastNumElts);
911 Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo);
912 return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx));
919 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext,
921 // If we are just checking for a icmp eq of a single bit and zext'ing it
922 // to an integer, then shift the bit to the appropriate place and then
923 // cast to integer to avoid the comparison.
925 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) {
927 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
928 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
929 if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) ||
930 (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) {
931 if (!DoTransform) return Cmp;
933 Value *In = Cmp->getOperand(0);
934 Value *Sh = ConstantInt::get(In->getType(),
935 In->getType()->getScalarSizeInBits() - 1);
936 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit");
937 if (In->getType() != Zext.getType())
938 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/);
940 if (Cmp->getPredicate() == ICmpInst::ICMP_SGT) {
941 Constant *One = ConstantInt::get(In->getType(), 1);
942 In = Builder.CreateXor(In, One, In->getName() + ".not");
945 return replaceInstUsesWith(Zext, In);
948 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
949 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
950 // zext (X == 1) to i32 --> X iff X has only the low bit set.
951 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
952 // zext (X != 0) to i32 --> X iff X has only the low bit set.
953 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
954 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
955 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
956 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) &&
957 // This only works for EQ and NE
959 // If Op1C some other power of two, convert:
960 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext);
962 APInt KnownZeroMask(~Known.Zero);
963 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
964 if (!DoTransform) return Cmp;
966 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE;
967 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) {
968 // (X&4) == 2 --> false
969 // (X&4) != 2 --> true
970 Constant *Res = ConstantInt::get(Zext.getType(), isNE);
971 return replaceInstUsesWith(Zext, Res);
974 uint32_t ShAmt = KnownZeroMask.logBase2();
975 Value *In = Cmp->getOperand(0);
977 // Perform a logical shr by shiftamt.
978 // Insert the shift to put the result in the low bit.
979 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
980 In->getName() + ".lobit");
983 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit.
984 Constant *One = ConstantInt::get(In->getType(), 1);
985 In = Builder.CreateXor(In, One);
988 if (Zext.getType() == In->getType())
989 return replaceInstUsesWith(Zext, In);
991 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false);
992 return replaceInstUsesWith(Zext, IntCast);
997 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
998 // It is also profitable to transform icmp eq into not(xor(A, B)) because that
999 // may lead to additional simplifications.
1000 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) {
1001 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) {
1002 Value *LHS = Cmp->getOperand(0);
1003 Value *RHS = Cmp->getOperand(1);
1005 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext);
1006 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext);
1008 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) {
1009 APInt KnownBits = KnownLHS.Zero | KnownLHS.One;
1010 APInt UnknownBit = ~KnownBits;
1011 if (UnknownBit.countPopulation() == 1) {
1012 if (!DoTransform) return Cmp;
1014 Value *Result = Builder.CreateXor(LHS, RHS);
1016 // Mask off any bits that are set and won't be shifted away.
1017 if (KnownLHS.One.uge(UnknownBit))
1018 Result = Builder.CreateAnd(Result,
1019 ConstantInt::get(ITy, UnknownBit));
1021 // Shift the bit we're testing down to the lsb.
1022 Result = Builder.CreateLShr(
1023 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
1025 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ)
1026 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1));
1027 Result->takeName(Cmp);
1028 return replaceInstUsesWith(Zext, Result);
1037 /// Determine if the specified value can be computed in the specified wider type
1038 /// and produce the same low bits. If not, return false.
1040 /// If this function returns true, it can also return a non-zero number of bits
1041 /// (in BitsToClear) which indicates that the value it computes is correct for
1042 /// the zero extend, but that the additional BitsToClear bits need to be zero'd
1043 /// out. For example, to promote something like:
1045 /// %B = trunc i64 %A to i32
1046 /// %C = lshr i32 %B, 8
1047 /// %E = zext i32 %C to i64
1049 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be
1050 /// set to 8 to indicate that the promoted value needs to have bits 24-31
1051 /// cleared in addition to bits 32-63. Since an 'and' will be generated to
1052 /// clear the top bits anyway, doing this has no extra cost.
1054 /// This function works on both vectors and scalars.
1055 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear,
1056 InstCombinerImpl &IC, Instruction *CxtI) {
1058 if (canAlwaysEvaluateInType(V, Ty))
1060 if (canNotEvaluateInType(V, Ty))
1063 auto *I = cast<Instruction>(V);
1065 switch (I->getOpcode()) {
1066 case Instruction::ZExt: // zext(zext(x)) -> zext(x).
1067 case Instruction::SExt: // zext(sext(x)) -> sext(x).
1068 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x)
1070 case Instruction::And:
1071 case Instruction::Or:
1072 case Instruction::Xor:
1073 case Instruction::Add:
1074 case Instruction::Sub:
1075 case Instruction::Mul:
1076 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) ||
1077 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI))
1079 // These can all be promoted if neither operand has 'bits to clear'.
1080 if (BitsToClear == 0 && Tmp == 0)
1083 // If the operation is an AND/OR/XOR and the bits to clear are zero in the
1084 // other side, BitsToClear is ok.
1085 if (Tmp == 0 && I->isBitwiseLogicOp()) {
1086 // We use MaskedValueIsZero here for generality, but the case we care
1087 // about the most is constant RHS.
1088 unsigned VSize = V->getType()->getScalarSizeInBits();
1089 if (IC.MaskedValueIsZero(I->getOperand(1),
1090 APInt::getHighBitsSet(VSize, BitsToClear),
1092 // If this is an And instruction and all of the BitsToClear are
1093 // known to be zero we can reset BitsToClear.
1094 if (I->getOpcode() == Instruction::And)
1100 // Otherwise, we don't know how to analyze this BitsToClear case yet.
1103 case Instruction::Shl: {
1104 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the
1105 // upper bits we can reduce BitsToClear by the shift amount.
1107 if (match(I->getOperand(1), m_APInt(Amt))) {
1108 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1110 uint64_t ShiftAmt = Amt->getZExtValue();
1111 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1116 case Instruction::LShr: {
1117 // We can promote lshr(x, cst) if we can promote x. This requires the
1118 // ultimate 'and' to clear out the high zero bits we're clearing out though.
1120 if (match(I->getOperand(1), m_APInt(Amt))) {
1121 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1123 BitsToClear += Amt->getZExtValue();
1124 if (BitsToClear > V->getType()->getScalarSizeInBits())
1125 BitsToClear = V->getType()->getScalarSizeInBits();
1128 // Cannot promote variable LSHR.
1131 case Instruction::Select:
1132 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) ||
1133 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) ||
1134 // TODO: If important, we could handle the case when the BitsToClear are
1135 // known zero in the disagreeing side.
1140 case Instruction::PHI: {
1141 // We can change a phi if we can change all operands. Note that we never
1142 // get into trouble with cyclic PHIs here because we only consider
1143 // instructions with a single use.
1144 PHINode *PN = cast<PHINode>(I);
1145 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI))
1147 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
1148 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) ||
1149 // TODO: If important, we could handle the case when the BitsToClear
1150 // are known zero in the disagreeing input.
1156 // TODO: Can handle more cases here.
1161 Instruction *InstCombinerImpl::visitZExt(ZExtInst &CI) {
1162 // If this zero extend is only used by a truncate, let the truncate be
1163 // eliminated before we try to optimize this zext.
1164 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1167 // If one of the common conversion will work, do it.
1168 if (Instruction *Result = commonCastTransforms(CI))
1171 Value *Src = CI.getOperand(0);
1172 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1174 // Try to extend the entire expression tree to the wide destination type.
1175 unsigned BitsToClear;
1176 if (shouldChangeType(SrcTy, DestTy) &&
1177 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) {
1178 assert(BitsToClear <= SrcTy->getScalarSizeInBits() &&
1179 "Can't clear more bits than in SrcTy");
1181 // Okay, we can transform this! Insert the new expression now.
1183 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1184 " to avoid zero extend: "
1186 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
1187 assert(Res->getType() == DestTy);
1189 // Preserve debug values referring to Src if the zext is its last use.
1190 if (auto *SrcOp = dyn_cast<Instruction>(Src))
1191 if (SrcOp->hasOneUse())
1192 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT);
1194 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
1195 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1197 // If the high bits are already filled with zeros, just replace this
1198 // cast with the result.
1199 if (MaskedValueIsZero(Res,
1200 APInt::getHighBitsSet(DestBitSize,
1201 DestBitSize-SrcBitsKept),
1203 return replaceInstUsesWith(CI, Res);
1205 // We need to emit an AND to clear the high bits.
1206 Constant *C = ConstantInt::get(Res->getType(),
1207 APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
1208 return BinaryOperator::CreateAnd(Res, C);
1211 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
1212 // types and if the sizes are just right we can convert this into a logical
1213 // 'and' which will be much cheaper than the pair of casts.
1214 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
1215 // TODO: Subsume this into EvaluateInDifferentType.
1217 // Get the sizes of the types involved. We know that the intermediate type
1218 // will be smaller than A or C, but don't know the relation between A and C.
1219 Value *A = CSrc->getOperand(0);
1220 unsigned SrcSize = A->getType()->getScalarSizeInBits();
1221 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1222 unsigned DstSize = CI.getType()->getScalarSizeInBits();
1223 // If we're actually extending zero bits, then if
1224 // SrcSize < DstSize: zext(a & mask)
1225 // SrcSize == DstSize: a & mask
1226 // SrcSize > DstSize: trunc(a) & mask
1227 if (SrcSize < DstSize) {
1228 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1229 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
1230 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask");
1231 return new ZExtInst(And, CI.getType());
1234 if (SrcSize == DstSize) {
1235 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1236 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
1239 if (SrcSize > DstSize) {
1240 Value *Trunc = Builder.CreateTrunc(A, CI.getType());
1241 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
1242 return BinaryOperator::CreateAnd(Trunc,
1243 ConstantInt::get(Trunc->getType(),
1248 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src))
1249 return transformZExtICmp(Cmp, CI);
1251 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
1252 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
1253 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one
1254 // of the (zext icmp) can be eliminated. If so, immediately perform the
1255 // according elimination.
1256 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
1257 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
1258 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
1259 (transformZExtICmp(LHS, CI, false) ||
1260 transformZExtICmp(RHS, CI, false))) {
1261 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp)
1262 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName());
1263 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName());
1264 Value *Or = Builder.CreateOr(LCast, RCast, CI.getName());
1265 if (auto *OrInst = dyn_cast<Instruction>(Or))
1266 Builder.SetInsertPoint(OrInst);
1268 // Perform the elimination.
1269 if (auto *LZExt = dyn_cast<ZExtInst>(LCast))
1270 transformZExtICmp(LHS, *LZExt);
1271 if (auto *RZExt = dyn_cast<ZExtInst>(RCast))
1272 transformZExtICmp(RHS, *RZExt);
1274 return replaceInstUsesWith(CI, Or);
1278 // zext(trunc(X) & C) -> (X & zext(C)).
1282 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) &&
1283 X->getType() == CI.getType())
1284 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType()));
1286 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)).
1288 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) &&
1289 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
1290 X->getType() == CI.getType()) {
1291 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
1292 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC);
1298 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp.
1299 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *ICI,
1301 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1);
1302 ICmpInst::Predicate Pred = ICI->getPredicate();
1304 // Don't bother if Op1 isn't of vector or integer type.
1305 if (!Op1->getType()->isIntOrIntVectorTy())
1308 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) ||
1309 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) {
1310 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative
1311 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive
1312 Value *Sh = ConstantInt::get(Op0->getType(),
1313 Op0->getType()->getScalarSizeInBits() - 1);
1314 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
1315 if (In->getType() != CI.getType())
1316 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/);
1318 if (Pred == ICmpInst::ICMP_SGT)
1319 In = Builder.CreateNot(In, In->getName() + ".not");
1320 return replaceInstUsesWith(CI, In);
1323 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1324 // If we know that only one bit of the LHS of the icmp can be set and we
1325 // have an equality comparison with zero or a power of 2, we can transform
1326 // the icmp and sext into bitwise/integer operations.
1327 if (ICI->hasOneUse() &&
1328 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1329 KnownBits Known = computeKnownBits(Op0, 0, &CI);
1331 APInt KnownZeroMask(~Known.Zero);
1332 if (KnownZeroMask.isPowerOf2()) {
1333 Value *In = ICI->getOperand(0);
1335 // If the icmp tests for a known zero bit we can constant fold it.
1336 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1337 Value *V = Pred == ICmpInst::ICMP_NE ?
1338 ConstantInt::getAllOnesValue(CI.getType()) :
1339 ConstantInt::getNullValue(CI.getType());
1340 return replaceInstUsesWith(CI, V);
1343 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) {
1344 // sext ((x & 2^n) == 0) -> (x >> n) - 1
1345 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1
1346 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
1347 // Perform a right shift to place the desired bit in the LSB.
1349 In = Builder.CreateLShr(In,
1350 ConstantInt::get(In->getType(), ShiftAmt));
1352 // At this point "In" is either 1 or 0. Subtract 1 to turn
1353 // {1, 0} -> {0, -1}.
1354 In = Builder.CreateAdd(In,
1355 ConstantInt::getAllOnesValue(In->getType()),
1358 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1
1359 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
1360 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
1361 // Perform a left shift to place the desired bit in the MSB.
1363 In = Builder.CreateShl(In,
1364 ConstantInt::get(In->getType(), ShiftAmt));
1366 // Distribute the bit over the whole bit width.
1367 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(),
1368 KnownZeroMask.getBitWidth() - 1), "sext");
1371 if (CI.getType() == In->getType())
1372 return replaceInstUsesWith(CI, In);
1373 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/);
1381 /// Return true if we can take the specified value and return it as type Ty
1382 /// without inserting any new casts and without changing the value of the common
1383 /// low bits. This is used by code that tries to promote integer operations to
1384 /// a wider types will allow us to eliminate the extension.
1386 /// This function works on both vectors and scalars.
1388 static bool canEvaluateSExtd(Value *V, Type *Ty) {
1389 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
1390 "Can't sign extend type to a smaller type");
1391 if (canAlwaysEvaluateInType(V, Ty))
1393 if (canNotEvaluateInType(V, Ty))
1396 auto *I = cast<Instruction>(V);
1397 switch (I->getOpcode()) {
1398 case Instruction::SExt: // sext(sext(x)) -> sext(x)
1399 case Instruction::ZExt: // sext(zext(x)) -> zext(x)
1400 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x)
1402 case Instruction::And:
1403 case Instruction::Or:
1404 case Instruction::Xor:
1405 case Instruction::Add:
1406 case Instruction::Sub:
1407 case Instruction::Mul:
1408 // These operators can all arbitrarily be extended if their inputs can.
1409 return canEvaluateSExtd(I->getOperand(0), Ty) &&
1410 canEvaluateSExtd(I->getOperand(1), Ty);
1412 //case Instruction::Shl: TODO
1413 //case Instruction::LShr: TODO
1415 case Instruction::Select:
1416 return canEvaluateSExtd(I->getOperand(1), Ty) &&
1417 canEvaluateSExtd(I->getOperand(2), Ty);
1419 case Instruction::PHI: {
1420 // We can change a phi if we can change all operands. Note that we never
1421 // get into trouble with cyclic PHIs here because we only consider
1422 // instructions with a single use.
1423 PHINode *PN = cast<PHINode>(I);
1424 for (Value *IncValue : PN->incoming_values())
1425 if (!canEvaluateSExtd(IncValue, Ty)) return false;
1429 // TODO: Can handle more cases here.
1436 Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) {
1437 // If this sign extend is only used by a truncate, let the truncate be
1438 // eliminated before we try to optimize this sext.
1439 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1442 if (Instruction *I = commonCastTransforms(CI))
1445 Value *Src = CI.getOperand(0);
1446 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1448 // If we know that the value being extended is positive, we can use a zext
1450 KnownBits Known = computeKnownBits(Src, 0, &CI);
1451 if (Known.isNonNegative())
1452 return CastInst::Create(Instruction::ZExt, Src, DestTy);
1454 // Try to extend the entire expression tree to the wide destination type.
1455 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) {
1456 // Okay, we can transform this! Insert the new expression now.
1458 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1459 " to avoid sign extend: "
1461 Value *Res = EvaluateInDifferentType(Src, DestTy, true);
1462 assert(Res->getType() == DestTy);
1464 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
1465 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1467 // If the high bits are already filled with sign bit, just replace this
1468 // cast with the result.
1469 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize)
1470 return replaceInstUsesWith(CI, Res);
1472 // We need to emit a shl + ashr to do the sign extend.
1473 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
1474 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"),
1478 // If the input is a trunc from the destination type, then turn sext(trunc(x))
1481 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) {
1482 // sext(trunc(X)) --> ashr(shl(X, C), C)
1483 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
1484 unsigned DestBitSize = DestTy->getScalarSizeInBits();
1485 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1486 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt);
1489 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
1490 return transformSExtICmp(ICI, CI);
1492 // If the input is a shl/ashr pair of a same constant, then this is a sign
1493 // extension from a smaller value. If we could trust arbitrary bitwidth
1494 // integers, we could turn this into a truncate to the smaller bit and then
1495 // use a sext for the whole extension. Since we don't, look deeper and check
1496 // for a truncate. If the source and dest are the same type, eliminate the
1497 // trunc and extend and just do shifts. For example, turn:
1498 // %a = trunc i32 %i to i8
1499 // %b = shl i8 %a, 6
1500 // %c = ashr i8 %b, 6
1501 // %d = sext i8 %c to i32
1503 // %a = shl i32 %i, 30
1504 // %d = ashr i32 %a, 30
1506 // TODO: Eventually this could be subsumed by EvaluateInDifferentType.
1507 Constant *BA = nullptr, *CA = nullptr;
1508 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)),
1510 BA == CA && A->getType() == CI.getType()) {
1511 unsigned MidSize = Src->getType()->getScalarSizeInBits();
1512 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
1513 Constant *SizeDiff = ConstantInt::get(CA->getType(), SrcDstSize - MidSize);
1514 Constant *ShAmt = ConstantExpr::getAdd(CA, SizeDiff);
1515 Constant *ShAmtExt = ConstantExpr::getSExt(ShAmt, CI.getType());
1516 A = Builder.CreateShl(A, ShAmtExt, CI.getName());
1517 return BinaryOperator::CreateAShr(A, ShAmtExt);
1523 /// Return a Constant* for the specified floating-point constant if it fits
1524 /// in the specified FP type without changing its value.
1525 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
1527 APFloat F = CFP->getValueAPF();
1528 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
1532 static Type *shrinkFPConstant(ConstantFP *CFP) {
1533 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext()))
1534 return nullptr; // No constant folding of this.
1535 // See if the value can be truncated to half and then reextended.
1536 if (fitsInFPType(CFP, APFloat::IEEEhalf()))
1537 return Type::getHalfTy(CFP->getContext());
1538 // See if the value can be truncated to float and then reextended.
1539 if (fitsInFPType(CFP, APFloat::IEEEsingle()))
1540 return Type::getFloatTy(CFP->getContext());
1541 if (CFP->getType()->isDoubleTy())
1542 return nullptr; // Won't shrink.
1543 if (fitsInFPType(CFP, APFloat::IEEEdouble()))
1544 return Type::getDoubleTy(CFP->getContext());
1545 // Don't try to shrink to various long double types.
1549 // Determine if this is a vector of ConstantFPs and if so, return the minimal
1550 // type we can safely truncate all elements to.
1551 // TODO: Make these support undef elements.
1552 static Type *shrinkFPConstantVector(Value *V) {
1553 auto *CV = dyn_cast<Constant>(V);
1554 auto *CVVTy = dyn_cast<VectorType>(V->getType());
1558 Type *MinType = nullptr;
1560 unsigned NumElts = cast<FixedVectorType>(CVVTy)->getNumElements();
1561 for (unsigned i = 0; i != NumElts; ++i) {
1562 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
1566 Type *T = shrinkFPConstant(CFP);
1570 // If we haven't found a type yet or this type has a larger mantissa than
1571 // our previous type, this is our new minimal type.
1572 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth())
1576 // Make a vector type from the minimal type.
1577 return FixedVectorType::get(MinType, NumElts);
1580 /// Find the minimum FP type we can safely truncate to.
1581 static Type *getMinimumFPType(Value *V) {
1582 if (auto *FPExt = dyn_cast<FPExtInst>(V))
1583 return FPExt->getOperand(0)->getType();
1585 // If this value is a constant, return the constant in the smallest FP type
1586 // that can accurately represent it. This allows us to turn
1587 // (float)((double)X+2.0) into x+2.0f.
1588 if (auto *CFP = dyn_cast<ConstantFP>(V))
1589 if (Type *T = shrinkFPConstant(CFP))
1592 // Try to shrink a vector of FP constants.
1593 if (Type *T = shrinkFPConstantVector(V))
1596 return V->getType();
1599 /// Return true if the cast from integer to FP can be proven to be exact for all
1600 /// possible inputs (the conversion does not lose any precision).
1601 static bool isKnownExactCastIntToFP(CastInst &I) {
1602 CastInst::CastOps Opcode = I.getOpcode();
1603 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) &&
1605 Value *Src = I.getOperand(0);
1606 Type *SrcTy = Src->getType();
1607 Type *FPTy = I.getType();
1608 bool IsSigned = Opcode == Instruction::SIToFP;
1609 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned;
1611 // Easy case - if the source integer type has less bits than the FP mantissa,
1612 // then the cast must be exact.
1613 int DestNumSigBits = FPTy->getFPMantissaWidth();
1614 if (SrcSize <= DestNumSigBits)
1617 // Cast from FP to integer and back to FP is independent of the intermediate
1618 // integer width because of poison on overflow.
1620 if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) {
1621 // If this is uitofp (fptosi F), the source needs an extra bit to avoid
1622 // potential rounding of negative FP input values.
1623 int SrcNumSigBits = F->getType()->getFPMantissaWidth();
1624 if (!IsSigned && match(Src, m_FPToSI(m_Value())))
1627 // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal
1628 // significant bits than the destination (and make sure neither type is
1629 // weird -- ppc_fp128).
1630 if (SrcNumSigBits > 0 && DestNumSigBits > 0 &&
1631 SrcNumSigBits <= DestNumSigBits)
1636 // Try harder to find if the source integer type has less significant bits.
1637 // For example, compute number of sign bits or compute low bit mask.
1641 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) {
1642 if (Instruction *I = commonCastTransforms(FPT))
1645 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to
1646 // simplify this expression to avoid one or more of the trunc/extend
1647 // operations if we can do so without changing the numerical results.
1649 // The exact manner in which the widths of the operands interact to limit
1650 // what we can and cannot do safely varies from operation to operation, and
1651 // is explained below in the various case statements.
1652 Type *Ty = FPT.getType();
1653 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0));
1654 if (BO && BO->hasOneUse()) {
1655 Type *LHSMinType = getMinimumFPType(BO->getOperand(0));
1656 Type *RHSMinType = getMinimumFPType(BO->getOperand(1));
1657 unsigned OpWidth = BO->getType()->getFPMantissaWidth();
1658 unsigned LHSWidth = LHSMinType->getFPMantissaWidth();
1659 unsigned RHSWidth = RHSMinType->getFPMantissaWidth();
1660 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1661 unsigned DstWidth = Ty->getFPMantissaWidth();
1662 switch (BO->getOpcode()) {
1664 case Instruction::FAdd:
1665 case Instruction::FSub:
1666 // For addition and subtraction, the infinitely precise result can
1667 // essentially be arbitrarily wide; proving that double rounding
1668 // will not occur because the result of OpI is exact (as we will for
1669 // FMul, for example) is hopeless. However, we *can* nonetheless
1670 // frequently know that double rounding cannot occur (or that it is
1671 // innocuous) by taking advantage of the specific structure of
1672 // infinitely-precise results that admit double rounding.
1674 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
1675 // to represent both sources, we can guarantee that the double
1676 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
1677 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
1678 // for proof of this fact).
1680 // Note: Figueroa does not consider the case where DstFormat !=
1681 // SrcFormat. It's possible (likely even!) that this analysis
1682 // could be tightened for those cases, but they are rare (the main
1683 // case of interest here is (float)((double)float + float)).
1684 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
1685 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1686 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1687 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS);
1688 RI->copyFastMathFlags(BO);
1692 case Instruction::FMul:
1693 // For multiplication, the infinitely precise result has at most
1694 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient
1695 // that such a value can be exactly represented, then no double
1696 // rounding can possibly occur; we can safely perform the operation
1697 // in the destination format if it can represent both sources.
1698 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1699 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1700 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1701 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO);
1704 case Instruction::FDiv:
1705 // For division, we use again use the bound from Figueroa's
1706 // dissertation. I am entirely certain that this bound can be
1707 // tightened in the unbalanced operand case by an analysis based on
1708 // the diophantine rational approximation bound, but the well-known
1709 // condition used here is a good conservative first pass.
1710 // TODO: Tighten bound via rigorous analysis of the unbalanced case.
1711 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
1712 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1713 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1714 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO);
1717 case Instruction::FRem: {
1718 // Remainder is straightforward. Remainder is always exact, so the
1719 // type of OpI doesn't enter into things at all. We simply evaluate
1720 // in whichever source type is larger, then convert to the
1721 // destination type.
1722 if (SrcWidth == OpWidth)
1725 if (LHSWidth == SrcWidth) {
1726 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType);
1727 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType);
1729 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType);
1730 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType);
1733 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO);
1734 return CastInst::CreateFPCast(ExactResult, Ty);
1739 // (fptrunc (fneg x)) -> (fneg (fptrunc x))
1741 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0));
1742 if (Op && Op->hasOneUse()) {
1743 // FIXME: The FMF should propagate from the fptrunc, not the source op.
1744 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1745 if (isa<FPMathOperator>(Op))
1746 Builder.setFastMathFlags(Op->getFastMathFlags());
1748 if (match(Op, m_FNeg(m_Value(X)))) {
1749 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty);
1751 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op);
1754 // If we are truncating a select that has an extended operand, we can
1755 // narrow the other operand and do the select as a narrow op.
1756 Value *Cond, *X, *Y;
1757 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) &&
1758 X->getType() == Ty) {
1759 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y)
1760 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1761 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op);
1762 return replaceInstUsesWith(FPT, Sel);
1764 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) &&
1765 X->getType() == Ty) {
1766 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X
1767 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1768 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op);
1769 return replaceInstUsesWith(FPT, Sel);
1773 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) {
1774 switch (II->getIntrinsicID()) {
1776 case Intrinsic::ceil:
1777 case Intrinsic::fabs:
1778 case Intrinsic::floor:
1779 case Intrinsic::nearbyint:
1780 case Intrinsic::rint:
1781 case Intrinsic::round:
1782 case Intrinsic::roundeven:
1783 case Intrinsic::trunc: {
1784 Value *Src = II->getArgOperand(0);
1785 if (!Src->hasOneUse())
1788 // Except for fabs, this transformation requires the input of the unary FP
1789 // operation to be itself an fpext from the type to which we're
1791 if (II->getIntrinsicID() != Intrinsic::fabs) {
1792 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src);
1793 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty)
1797 // Do unary FP operation on smaller type.
1798 // (fptrunc (fabs x)) -> (fabs (fptrunc x))
1799 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty);
1800 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(),
1801 II->getIntrinsicID(), Ty);
1802 SmallVector<OperandBundleDef, 1> OpBundles;
1803 II->getOperandBundlesAsDefs(OpBundles);
1805 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName());
1806 NewCI->copyFastMathFlags(II);
1812 if (Instruction *I = shrinkInsertElt(FPT, Builder))
1815 Value *Src = FPT.getOperand(0);
1816 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1817 auto *FPCast = cast<CastInst>(Src);
1818 if (isKnownExactCastIntToFP(*FPCast))
1819 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty);
1825 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) {
1826 // If the source operand is a cast from integer to FP and known exact, then
1827 // cast the integer operand directly to the destination type.
1828 Type *Ty = FPExt.getType();
1829 Value *Src = FPExt.getOperand(0);
1830 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1831 auto *FPCast = cast<CastInst>(Src);
1832 if (isKnownExactCastIntToFP(*FPCast))
1833 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty);
1836 return commonCastTransforms(FPExt);
1839 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X)
1840 /// This is safe if the intermediate type has enough bits in its mantissa to
1841 /// accurately represent all values of X. For example, this won't work with
1842 /// i64 -> float -> i64.
1843 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) {
1844 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0)))
1847 auto *OpI = cast<CastInst>(FI.getOperand(0));
1848 Value *X = OpI->getOperand(0);
1849 Type *XType = X->getType();
1850 Type *DestType = FI.getType();
1851 bool IsOutputSigned = isa<FPToSIInst>(FI);
1853 // Since we can assume the conversion won't overflow, our decision as to
1854 // whether the input will fit in the float should depend on the minimum
1855 // of the input range and output range.
1857 // This means this is also safe for a signed input and unsigned output, since
1858 // a negative input would lead to undefined behavior.
1859 if (!isKnownExactCastIntToFP(*OpI)) {
1860 // The first cast may not round exactly based on the source integer width
1861 // and FP width, but the overflow UB rules can still allow this to fold.
1862 // If the destination type is narrow, that means the intermediate FP value
1863 // must be large enough to hold the source value exactly.
1864 // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior.
1865 int OutputSize = (int)DestType->getScalarSizeInBits() - IsOutputSigned;
1866 if (OutputSize > OpI->getType()->getFPMantissaWidth())
1870 if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) {
1871 bool IsInputSigned = isa<SIToFPInst>(OpI);
1872 if (IsInputSigned && IsOutputSigned)
1873 return new SExtInst(X, DestType);
1874 return new ZExtInst(X, DestType);
1876 if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits())
1877 return new TruncInst(X, DestType);
1879 assert(XType == DestType && "Unexpected types for int to FP to int casts");
1880 return replaceInstUsesWith(FI, X);
1883 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) {
1884 if (Instruction *I = foldItoFPtoI(FI))
1887 return commonCastTransforms(FI);
1890 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) {
1891 if (Instruction *I = foldItoFPtoI(FI))
1894 return commonCastTransforms(FI);
1897 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) {
1898 return commonCastTransforms(CI);
1901 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) {
1902 return commonCastTransforms(CI);
1905 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) {
1906 // If the source integer type is not the intptr_t type for this target, do a
1907 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the
1908 // cast to be exposed to other transforms.
1909 unsigned AS = CI.getAddressSpace();
1910 if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
1911 DL.getPointerSizeInBits(AS)) {
1912 Type *Ty = DL.getIntPtrType(CI.getContext(), AS);
1913 // Handle vectors of pointers.
1914 if (auto *CIVTy = dyn_cast<VectorType>(CI.getType()))
1915 Ty = VectorType::get(Ty, CIVTy->getElementCount());
1917 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty);
1918 return new IntToPtrInst(P, CI.getType());
1921 if (Instruction *I = commonCastTransforms(CI))
1927 /// Implement the transforms for cast of pointer (bitcast/ptrtoint)
1928 Instruction *InstCombinerImpl::commonPointerCastTransforms(CastInst &CI) {
1929 Value *Src = CI.getOperand(0);
1931 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
1932 // If casting the result of a getelementptr instruction with no offset, turn
1933 // this into a cast of the original pointer!
1934 if (GEP->hasAllZeroIndices() &&
1935 // If CI is an addrspacecast and GEP changes the poiner type, merging
1936 // GEP into CI would undo canonicalizing addrspacecast with different
1937 // pointer types, causing infinite loops.
1938 (!isa<AddrSpaceCastInst>(CI) ||
1939 GEP->getType() == GEP->getPointerOperandType())) {
1940 // Changing the cast operand is usually not a good idea but it is safe
1941 // here because the pointer operand is being replaced with another
1942 // pointer operand so the opcode doesn't need to change.
1943 return replaceOperand(CI, 0, GEP->getOperand(0));
1947 return commonCastTransforms(CI);
1950 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) {
1951 // If the destination integer type is not the intptr_t type for this target,
1952 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
1953 // to be exposed to other transforms.
1954 Value *SrcOp = CI.getPointerOperand();
1955 Type *Ty = CI.getType();
1956 unsigned AS = CI.getPointerAddressSpace();
1957 unsigned TySize = Ty->getScalarSizeInBits();
1958 unsigned PtrSize = DL.getPointerSizeInBits(AS);
1959 if (TySize != PtrSize) {
1960 Type *IntPtrTy = DL.getIntPtrType(CI.getContext(), AS);
1961 if (auto *VecTy = dyn_cast<VectorType>(Ty)) {
1962 // Handle vectors of pointers.
1963 // FIXME: what should happen for scalable vectors?
1964 IntPtrTy = FixedVectorType::get(
1965 IntPtrTy, cast<FixedVectorType>(VecTy)->getNumElements());
1968 Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy);
1969 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
1972 Value *Vec, *Scalar, *Index;
1973 if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)),
1974 m_Value(Scalar), m_Value(Index)))) &&
1975 Vec->getType() == Ty) {
1976 assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type");
1977 // Convert the scalar to int followed by insert to eliminate one cast:
1978 // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index
1979 Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType());
1980 return InsertElementInst::Create(Vec, NewCast, Index);
1983 return commonPointerCastTransforms(CI);
1986 /// This input value (which is known to have vector type) is being zero extended
1987 /// or truncated to the specified vector type. Since the zext/trunc is done
1988 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern,
1989 /// endianness will impact which end of the vector that is extended or
1992 /// A vector is always stored with index 0 at the lowest address, which
1993 /// corresponds to the most significant bits for a big endian stored integer and
1994 /// the least significant bits for little endian. A trunc/zext of an integer
1995 /// impacts the big end of the integer. Thus, we need to add/remove elements at
1996 /// the front of the vector for big endian targets, and the back of the vector
1997 /// for little endian targets.
1999 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible.
2001 /// The source and destination vector types may have different element types.
2002 static Instruction *
2003 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy,
2004 InstCombinerImpl &IC) {
2005 // We can only do this optimization if the output is a multiple of the input
2006 // element size, or the input is a multiple of the output element size.
2007 // Convert the input type to have the same element type as the output.
2008 VectorType *SrcTy = cast<VectorType>(InVal->getType());
2010 if (SrcTy->getElementType() != DestTy->getElementType()) {
2011 // The input types don't need to be identical, but for now they must be the
2012 // same size. There is no specific reason we couldn't handle things like
2013 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
2015 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
2016 DestTy->getElementType()->getPrimitiveSizeInBits())
2020 FixedVectorType::get(DestTy->getElementType(),
2021 cast<FixedVectorType>(SrcTy)->getNumElements());
2022 InVal = IC.Builder.CreateBitCast(InVal, SrcTy);
2025 bool IsBigEndian = IC.getDataLayout().isBigEndian();
2026 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements();
2027 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements();
2029 assert(SrcElts != DestElts && "Element counts should be different.");
2031 // Now that the element types match, get the shuffle mask and RHS of the
2032 // shuffle to use, which depends on whether we're increasing or decreasing the
2033 // size of the input.
2034 SmallVector<int, 16> ShuffleMaskStorage;
2035 ArrayRef<int> ShuffleMask;
2038 // Produce an identify shuffle mask for the src vector.
2039 ShuffleMaskStorage.resize(SrcElts);
2040 std::iota(ShuffleMaskStorage.begin(), ShuffleMaskStorage.end(), 0);
2042 if (SrcElts > DestElts) {
2043 // If we're shrinking the number of elements (rewriting an integer
2044 // truncate), just shuffle in the elements corresponding to the least
2045 // significant bits from the input and use undef as the second shuffle
2047 V2 = UndefValue::get(SrcTy);
2048 // Make sure the shuffle mask selects the "least significant bits" by
2049 // keeping elements from back of the src vector for big endian, and from the
2050 // front for little endian.
2051 ShuffleMask = ShuffleMaskStorage;
2053 ShuffleMask = ShuffleMask.take_back(DestElts);
2055 ShuffleMask = ShuffleMask.take_front(DestElts);
2057 // If we're increasing the number of elements (rewriting an integer zext),
2058 // shuffle in all of the elements from InVal. Fill the rest of the result
2059 // elements with zeros from a constant zero.
2060 V2 = Constant::getNullValue(SrcTy);
2061 // Use first elt from V2 when indicating zero in the shuffle mask.
2062 uint32_t NullElt = SrcElts;
2063 // Extend with null values in the "most significant bits" by adding elements
2064 // in front of the src vector for big endian, and at the back for little
2066 unsigned DeltaElts = DestElts - SrcElts;
2068 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt);
2070 ShuffleMaskStorage.append(DeltaElts, NullElt);
2071 ShuffleMask = ShuffleMaskStorage;
2074 return new ShuffleVectorInst(InVal, V2, ShuffleMask);
2077 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
2078 return Value % Ty->getPrimitiveSizeInBits() == 0;
2081 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
2082 return Value / Ty->getPrimitiveSizeInBits();
2085 /// V is a value which is inserted into a vector of VecEltTy.
2086 /// Look through the value to see if we can decompose it into
2087 /// insertions into the vector. See the example in the comment for
2088 /// OptimizeIntegerToVectorInsertions for the pattern this handles.
2089 /// The type of V is always a non-zero multiple of VecEltTy's size.
2090 /// Shift is the number of bits between the lsb of V and the lsb of
2093 /// This returns false if the pattern can't be matched or true if it can,
2094 /// filling in Elements with the elements found here.
2095 static bool collectInsertionElements(Value *V, unsigned Shift,
2096 SmallVectorImpl<Value *> &Elements,
2097 Type *VecEltTy, bool isBigEndian) {
2098 assert(isMultipleOfTypeSize(Shift, VecEltTy) &&
2099 "Shift should be a multiple of the element type size");
2101 // Undef values never contribute useful bits to the result.
2102 if (isa<UndefValue>(V)) return true;
2104 // If we got down to a value of the right type, we win, try inserting into the
2106 if (V->getType() == VecEltTy) {
2107 // Inserting null doesn't actually insert any elements.
2108 if (Constant *C = dyn_cast<Constant>(V))
2109 if (C->isNullValue())
2112 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy);
2114 ElementIndex = Elements.size() - ElementIndex - 1;
2116 // Fail if multiple elements are inserted into this slot.
2117 if (Elements[ElementIndex])
2120 Elements[ElementIndex] = V;
2124 if (Constant *C = dyn_cast<Constant>(V)) {
2125 // Figure out the # elements this provides, and bitcast it or slice it up
2127 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(),
2129 // If the constant is the size of a vector element, we just need to bitcast
2130 // it to the right type so it gets properly inserted.
2132 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
2133 Shift, Elements, VecEltTy, isBigEndian);
2135 // Okay, this is a constant that covers multiple elements. Slice it up into
2136 // pieces and insert each element-sized piece into the vector.
2137 if (!isa<IntegerType>(C->getType()))
2138 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
2139 C->getType()->getPrimitiveSizeInBits()));
2140 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
2141 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
2143 for (unsigned i = 0; i != NumElts; ++i) {
2144 unsigned ShiftI = Shift+i*ElementSize;
2145 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
2147 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy);
2148 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy,
2155 if (!V->hasOneUse()) return false;
2157 Instruction *I = dyn_cast<Instruction>(V);
2158 if (!I) return false;
2159 switch (I->getOpcode()) {
2160 default: return false; // Unhandled case.
2161 case Instruction::BitCast:
2162 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2164 case Instruction::ZExt:
2165 if (!isMultipleOfTypeSize(
2166 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
2169 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2171 case Instruction::Or:
2172 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2174 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy,
2176 case Instruction::Shl: {
2177 // Must be shifting by a constant that is a multiple of the element size.
2178 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
2179 if (!CI) return false;
2180 Shift += CI->getZExtValue();
2181 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
2182 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2190 /// If the input is an 'or' instruction, we may be doing shifts and ors to
2191 /// assemble the elements of the vector manually.
2192 /// Try to rip the code out and replace it with insertelements. This is to
2193 /// optimize code like this:
2195 /// %tmp37 = bitcast float %inc to i32
2196 /// %tmp38 = zext i32 %tmp37 to i64
2197 /// %tmp31 = bitcast float %inc5 to i32
2198 /// %tmp32 = zext i32 %tmp31 to i64
2199 /// %tmp33 = shl i64 %tmp32, 32
2200 /// %ins35 = or i64 %tmp33, %tmp38
2201 /// %tmp43 = bitcast i64 %ins35 to <2 x float>
2203 /// Into two insertelements that do "buildvector{%inc, %inc5}".
2204 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
2205 InstCombinerImpl &IC) {
2206 auto *DestVecTy = cast<FixedVectorType>(CI.getType());
2207 Value *IntInput = CI.getOperand(0);
2209 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
2210 if (!collectInsertionElements(IntInput, 0, Elements,
2211 DestVecTy->getElementType(),
2212 IC.getDataLayout().isBigEndian()))
2215 // If we succeeded, we know that all of the element are specified by Elements
2216 // or are zero if Elements has a null entry. Recast this as a set of
2218 Value *Result = Constant::getNullValue(CI.getType());
2219 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
2220 if (!Elements[i]) continue; // Unset element.
2222 Result = IC.Builder.CreateInsertElement(Result, Elements[i],
2223 IC.Builder.getInt32(i));
2229 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the
2230 /// vector followed by extract element. The backend tends to handle bitcasts of
2231 /// vectors better than bitcasts of scalars because vector registers are
2232 /// usually not type-specific like scalar integer or scalar floating-point.
2233 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
2234 InstCombinerImpl &IC) {
2235 // TODO: Create and use a pattern matcher for ExtractElementInst.
2236 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0));
2237 if (!ExtElt || !ExtElt->hasOneUse())
2240 // The bitcast must be to a vectorizable type, otherwise we can't make a new
2241 // type to extract from.
2242 Type *DestType = BitCast.getType();
2243 if (!VectorType::isValidElementType(DestType))
2246 auto *NewVecType = VectorType::get(DestType, ExtElt->getVectorOperandType());
2247 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(),
2249 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand());
2252 /// Change the type of a bitwise logic operation if we can eliminate a bitcast.
2253 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast,
2254 InstCombiner::BuilderTy &Builder) {
2255 Type *DestTy = BitCast.getType();
2257 if (!DestTy->isIntOrIntVectorTy() ||
2258 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) ||
2259 !BO->isBitwiseLogicOp())
2262 // FIXME: This transform is restricted to vector types to avoid backend
2263 // problems caused by creating potentially illegal operations. If a fix-up is
2264 // added to handle that situation, we can remove this check.
2265 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy())
2269 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
2270 X->getType() == DestTy && !isa<Constant>(X)) {
2271 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
2272 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy);
2273 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1);
2276 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) &&
2277 X->getType() == DestTy && !isa<Constant>(X)) {
2278 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X)
2279 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2280 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X);
2283 // Canonicalize vector bitcasts to come before vector bitwise logic with a
2284 // constant. This eases recognition of special constants for later ops.
2286 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
2288 if (match(BO->getOperand(1), m_Constant(C))) {
2289 // bitcast (logic X, C) --> logic (bitcast X, C')
2290 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2291 Value *CastedC = Builder.CreateBitCast(C, DestTy);
2292 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC);
2298 /// Change the type of a select if we can eliminate a bitcast.
2299 static Instruction *foldBitCastSelect(BitCastInst &BitCast,
2300 InstCombiner::BuilderTy &Builder) {
2301 Value *Cond, *TVal, *FVal;
2302 if (!match(BitCast.getOperand(0),
2303 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
2306 // A vector select must maintain the same number of elements in its operands.
2307 Type *CondTy = Cond->getType();
2308 Type *DestTy = BitCast.getType();
2309 if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) {
2310 if (!DestTy->isVectorTy())
2312 if (cast<FixedVectorType>(DestTy)->getNumElements() !=
2313 cast<FixedVectorType>(CondVTy)->getNumElements())
2317 // FIXME: This transform is restricted from changing the select between
2318 // scalars and vectors to avoid backend problems caused by creating
2319 // potentially illegal operations. If a fix-up is added to handle that
2320 // situation, we can remove this check.
2321 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy())
2324 auto *Sel = cast<Instruction>(BitCast.getOperand(0));
2326 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2327 !isa<Constant>(X)) {
2328 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y))
2329 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy);
2330 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel);
2333 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2334 !isa<Constant>(X)) {
2335 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X)
2336 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy);
2337 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel);
2343 /// Check if all users of CI are StoreInsts.
2344 static bool hasStoreUsersOnly(CastInst &CI) {
2345 for (User *U : CI.users()) {
2346 if (!isa<StoreInst>(U))
2352 /// This function handles following case
2358 /// All the related PHI nodes can be replaced by new PHI nodes with type A.
2359 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN.
2360 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI,
2362 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp.
2363 if (hasStoreUsersOnly(CI))
2366 Value *Src = CI.getOperand(0);
2367 Type *SrcTy = Src->getType(); // Type B
2368 Type *DestTy = CI.getType(); // Type A
2370 SmallVector<PHINode *, 4> PhiWorklist;
2371 SmallSetVector<PHINode *, 4> OldPhiNodes;
2373 // Find all of the A->B casts and PHI nodes.
2374 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so
2375 // OldPhiNodes is used to track all known PHI nodes, before adding a new
2376 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first.
2377 PhiWorklist.push_back(PN);
2378 OldPhiNodes.insert(PN);
2379 while (!PhiWorklist.empty()) {
2380 auto *OldPN = PhiWorklist.pop_back_val();
2381 for (Value *IncValue : OldPN->incoming_values()) {
2382 if (isa<Constant>(IncValue))
2385 if (auto *LI = dyn_cast<LoadInst>(IncValue)) {
2386 // If there is a sequence of one or more load instructions, each loaded
2387 // value is used as address of later load instruction, bitcast is
2388 // necessary to change the value type, don't optimize it. For
2389 // simplicity we give up if the load address comes from another load.
2390 Value *Addr = LI->getOperand(0);
2391 if (Addr == &CI || isa<LoadInst>(Addr))
2393 if (LI->hasOneUse() && LI->isSimple())
2395 // If a LoadInst has more than one use, changing the type of loaded
2396 // value may create another bitcast.
2400 if (auto *PNode = dyn_cast<PHINode>(IncValue)) {
2401 if (OldPhiNodes.insert(PNode))
2402 PhiWorklist.push_back(PNode);
2406 auto *BCI = dyn_cast<BitCastInst>(IncValue);
2407 // We can't handle other instructions.
2411 // Verify it's a A->B cast.
2412 Type *TyA = BCI->getOperand(0)->getType();
2413 Type *TyB = BCI->getType();
2414 if (TyA != DestTy || TyB != SrcTy)
2419 // Check that each user of each old PHI node is something that we can
2420 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards.
2421 for (auto *OldPN : OldPhiNodes) {
2422 for (User *V : OldPN->users()) {
2423 if (auto *SI = dyn_cast<StoreInst>(V)) {
2424 if (!SI->isSimple() || SI->getOperand(0) != OldPN)
2426 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2427 // Verify it's a B->A cast.
2428 Type *TyB = BCI->getOperand(0)->getType();
2429 Type *TyA = BCI->getType();
2430 if (TyA != DestTy || TyB != SrcTy)
2432 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2433 // As long as the user is another old PHI node, then even if we don't
2434 // rewrite it, the PHI web we're considering won't have any users
2435 // outside itself, so it'll be dead.
2436 if (OldPhiNodes.count(PHI) == 0)
2444 // For each old PHI node, create a corresponding new PHI node with a type A.
2445 SmallDenseMap<PHINode *, PHINode *> NewPNodes;
2446 for (auto *OldPN : OldPhiNodes) {
2447 Builder.SetInsertPoint(OldPN);
2448 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
2449 NewPNodes[OldPN] = NewPN;
2452 // Fill in the operands of new PHI nodes.
2453 for (auto *OldPN : OldPhiNodes) {
2454 PHINode *NewPN = NewPNodes[OldPN];
2455 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2456 Value *V = OldPN->getOperand(j);
2457 Value *NewV = nullptr;
2458 if (auto *C = dyn_cast<Constant>(V)) {
2459 NewV = ConstantExpr::getBitCast(C, DestTy);
2460 } else if (auto *LI = dyn_cast<LoadInst>(V)) {
2461 // Explicitly perform load combine to make sure no opposing transform
2462 // can remove the bitcast in the meantime and trigger an infinite loop.
2463 Builder.SetInsertPoint(LI);
2464 NewV = combineLoadToNewType(*LI, DestTy);
2465 // Remove the old load and its use in the old phi, which itself becomes
2466 // dead once the whole transform finishes.
2467 replaceInstUsesWith(*LI, UndefValue::get(LI->getType()));
2468 eraseInstFromFunction(*LI);
2469 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2470 NewV = BCI->getOperand(0);
2471 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) {
2472 NewV = NewPNodes[PrevPN];
2475 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j));
2479 // Traverse all accumulated PHI nodes and process its users,
2480 // which are Stores and BitcCasts. Without this processing
2481 // NewPHI nodes could be replicated and could lead to extra
2482 // moves generated after DeSSA.
2483 // If there is a store with type B, change it to type A.
2486 // Replace users of BitCast B->A with NewPHI. These will help
2487 // later to get rid off a closure formed by OldPHI nodes.
2488 Instruction *RetVal = nullptr;
2489 for (auto *OldPN : OldPhiNodes) {
2490 PHINode *NewPN = NewPNodes[OldPN];
2491 for (auto It = OldPN->user_begin(), End = OldPN->user_end(); It != End; ) {
2493 // We may remove this user, advance to avoid iterator invalidation.
2495 if (auto *SI = dyn_cast<StoreInst>(V)) {
2496 assert(SI->isSimple() && SI->getOperand(0) == OldPN);
2497 Builder.SetInsertPoint(SI);
2499 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy));
2500 SI->setOperand(0, NewBC);
2502 assert(hasStoreUsersOnly(*NewBC));
2504 else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2505 Type *TyB = BCI->getOperand(0)->getType();
2506 Type *TyA = BCI->getType();
2507 assert(TyA == DestTy && TyB == SrcTy);
2510 Instruction *I = replaceInstUsesWith(*BCI, NewPN);
2513 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2514 assert(OldPhiNodes.count(PHI) > 0);
2517 llvm_unreachable("all uses should be handled");
2525 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) {
2526 // If the operands are integer typed then apply the integer transforms,
2527 // otherwise just apply the common ones.
2528 Value *Src = CI.getOperand(0);
2529 Type *SrcTy = Src->getType();
2530 Type *DestTy = CI.getType();
2532 // Get rid of casts from one type to the same type. These are useless and can
2533 // be replaced by the operand.
2534 if (DestTy == Src->getType())
2535 return replaceInstUsesWith(CI, Src);
2537 if (isa<PointerType>(SrcTy) && isa<PointerType>(DestTy)) {
2538 PointerType *SrcPTy = cast<PointerType>(SrcTy);
2539 PointerType *DstPTy = cast<PointerType>(DestTy);
2540 Type *DstElTy = DstPTy->getElementType();
2541 Type *SrcElTy = SrcPTy->getElementType();
2543 // Casting pointers between the same type, but with different address spaces
2544 // is an addrspace cast rather than a bitcast.
2545 if ((DstElTy == SrcElTy) &&
2546 (DstPTy->getAddressSpace() != SrcPTy->getAddressSpace()))
2547 return new AddrSpaceCastInst(Src, DestTy);
2549 // If we are casting a alloca to a pointer to a type of the same
2550 // size, rewrite the allocation instruction to allocate the "right" type.
2551 // There is no need to modify malloc calls because it is their bitcast that
2552 // needs to be cleaned up.
2553 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
2554 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
2557 // When the type pointed to is not sized the cast cannot be
2558 // turned into a gep.
2560 cast<PointerType>(Src->getType()->getScalarType())->getElementType();
2561 if (!PointeeType->isSized())
2564 // If the source and destination are pointers, and this cast is equivalent
2565 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
2566 // This can enhance SROA and other transforms that want type-safe pointers.
2567 unsigned NumZeros = 0;
2568 while (SrcElTy && SrcElTy != DstElTy) {
2569 SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0);
2573 // If we found a path from the src to dest, create the getelementptr now.
2574 if (SrcElTy == DstElTy) {
2575 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
2576 GetElementPtrInst *GEP =
2577 GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs);
2579 // If the source pointer is dereferenceable, then assume it points to an
2580 // allocated object and apply "inbounds" to the GEP.
2582 if (Src->getPointerDereferenceableBytes(DL, CanBeNull)) {
2583 // In a non-default address space (not 0), a null pointer can not be
2584 // assumed inbounds, so ignore that case (dereferenceable_or_null).
2585 // The reason is that 'null' is not treated differently in these address
2586 // spaces, and we consequently ignore the 'gep inbounds' special case
2587 // for 'null' which allows 'inbounds' on 'null' if the indices are
2589 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull)
2590 GEP->setIsInBounds();
2596 if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) {
2597 // Beware: messing with this target-specific oddity may cause trouble.
2598 if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) {
2599 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType());
2600 return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
2601 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2604 if (isa<IntegerType>(SrcTy)) {
2605 // If this is a cast from an integer to vector, check to see if the input
2606 // is a trunc or zext of a bitcast from vector. If so, we can replace all
2607 // the casts with a shuffle and (potentially) a bitcast.
2608 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
2609 CastInst *SrcCast = cast<CastInst>(Src);
2610 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0)))
2611 if (isa<VectorType>(BCIn->getOperand(0)->getType()))
2612 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts(
2613 BCIn->getOperand(0), cast<VectorType>(DestTy), *this))
2617 // If the input is an 'or' instruction, we may be doing shifts and ors to
2618 // assemble the elements of the vector manually. Try to rip the code out
2619 // and replace it with insertelements.
2620 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this))
2621 return replaceInstUsesWith(CI, V);
2625 if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) {
2626 if (SrcVTy->getNumElements() == 1) {
2627 // If our destination is not a vector, then make this a straight
2628 // scalar-scalar cast.
2629 if (!DestTy->isVectorTy()) {
2631 Builder.CreateExtractElement(Src,
2632 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2633 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
2636 // Otherwise, see if our source is an insert. If so, then use the scalar
2637 // component directly:
2638 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m>
2639 if (auto *InsElt = dyn_cast<InsertElementInst>(Src))
2640 return new BitCastInst(InsElt->getOperand(1), DestTy);
2644 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) {
2645 // Okay, we have (bitcast (shuffle ..)). Check to see if this is
2646 // a bitcast to a vector with the same # elts.
2647 Value *ShufOp0 = Shuf->getOperand(0);
2648 Value *ShufOp1 = Shuf->getOperand(1);
2649 unsigned NumShufElts =
2650 cast<FixedVectorType>(Shuf->getType())->getNumElements();
2651 unsigned NumSrcVecElts =
2652 cast<FixedVectorType>(ShufOp0->getType())->getNumElements();
2653 if (Shuf->hasOneUse() && DestTy->isVectorTy() &&
2654 cast<FixedVectorType>(DestTy)->getNumElements() == NumShufElts &&
2655 NumShufElts == NumSrcVecElts) {
2657 // If either of the operands is a cast from CI.getType(), then
2658 // evaluating the shuffle in the casted destination's type will allow
2659 // us to eliminate at least one cast.
2660 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) &&
2661 Tmp->getOperand(0)->getType() == DestTy) ||
2662 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) &&
2663 Tmp->getOperand(0)->getType() == DestTy)) {
2664 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy);
2665 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy);
2666 // Return a new shuffle vector. Use the same element ID's, as we
2667 // know the vector types match #elts.
2668 return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask());
2672 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as
2674 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X)
2675 // TODO: We should match the related pattern for bitreverse.
2676 if (DestTy->isIntegerTy() &&
2677 DL.isLegalInteger(DestTy->getScalarSizeInBits()) &&
2678 SrcTy->getScalarSizeInBits() == 8 && NumShufElts % 2 == 0 &&
2679 Shuf->hasOneUse() && Shuf->isReverse()) {
2680 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask");
2681 assert(isa<UndefValue>(ShufOp1) && "Unexpected shuffle op");
2683 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy);
2684 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy);
2685 return IntrinsicInst::Create(Bswap, { ScalarX });
2689 // Handle the A->B->A cast, and there is an intervening PHI node.
2690 if (PHINode *PN = dyn_cast<PHINode>(Src))
2691 if (Instruction *I = optimizeBitCastFromPhi(CI, PN))
2694 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this))
2697 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder))
2700 if (Instruction *I = foldBitCastSelect(CI, Builder))
2703 if (SrcTy->isPointerTy())
2704 return commonPointerCastTransforms(CI);
2705 return commonCastTransforms(CI);
2708 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
2709 // If the destination pointer element type is not the same as the source's
2710 // first do a bitcast to the destination type, and then the addrspacecast.
2711 // This allows the cast to be exposed to other transforms.
2712 Value *Src = CI.getOperand(0);
2713 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType());
2714 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType());
2716 Type *DestElemTy = DestTy->getElementType();
2717 if (SrcTy->getElementType() != DestElemTy) {
2718 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace());
2719 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) {
2720 // Handle vectors of pointers.
2721 // FIXME: what should happen for scalable vectors?
2722 MidTy = FixedVectorType::get(MidTy,
2723 cast<FixedVectorType>(VT)->getNumElements());
2726 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy);
2727 return new AddrSpaceCastInst(NewBitCast, CI.getType());
2730 return commonPointerCastTransforms(CI);